Revert "[promises] Convert call to a party" (#32651)
Reverts grpc/grpc#32359
diff --git a/BUILD b/BUILD
index 5b1e695..7ada528 100644
--- a/BUILD
+++ b/BUILD
@@ -699,7 +699,6 @@
external_deps = [
"absl/base",
"absl/base:core_headers",
- "absl/functional:any_invocable",
"absl/memory",
"absl/random",
"absl/status",
@@ -1310,7 +1309,6 @@
"//src/core:lib/transport/timeout_encoding.cc",
"//src/core:lib/transport/transport.cc",
"//src/core:lib/transport/transport_op_string.cc",
- "//src/core:lib/transport/batch_builder.cc",
] +
# TODO(vigneshbabu): remove these
# These headers used to be vended by this target, but they have to be
@@ -1402,7 +1400,6 @@
"//src/core:lib/transport/timeout_encoding.h",
"//src/core:lib/transport/transport.h",
"//src/core:lib/transport/transport_impl.h",
- "//src/core:lib/transport/batch_builder.h",
] +
# TODO(vigneshbabu): remove these
# These headers used to be vended by this target, but they have to be
@@ -1459,7 +1456,6 @@
"stats",
"uri_parser",
"work_serializer",
- "//src/core:1999",
"//src/core:activity",
"//src/core:arena",
"//src/core:arena_promise",
@@ -1487,19 +1483,15 @@
"//src/core:event_engine_trace",
"//src/core:event_log",
"//src/core:experiments",
- "//src/core:for_each",
"//src/core:gpr_atm",
"//src/core:gpr_manual_constructor",
"//src/core:gpr_spinlock",
"//src/core:grpc_sockaddr",
"//src/core:http2_errors",
- "//src/core:if",
"//src/core:init_internally",
"//src/core:iomgr_fwd",
"//src/core:iomgr_port",
"//src/core:json",
- "//src/core:latch",
- "//src/core:loop",
"//src/core:map",
"//src/core:match",
"//src/core:memory_quota",
@@ -1511,12 +1503,10 @@
"//src/core:pollset_set",
"//src/core:posix_event_engine_base_hdrs",
"//src/core:promise_status",
- "//src/core:race",
"//src/core:ref_counted",
"//src/core:resolved_address",
"//src/core:resource_quota",
"//src/core:resource_quota_trace",
- "//src/core:seq",
"//src/core:slice",
"//src/core:slice_buffer",
"//src/core:slice_cast",
@@ -2349,7 +2339,6 @@
grpc_cc_library(
name = "promise",
external_deps = [
- "absl/functional:any_invocable",
"absl/status",
"absl/types:optional",
],
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8e54265..8ae6cb9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1069,6 +1069,7 @@
add_dependencies(buildtests_cxx nonblocking_test)
add_dependencies(buildtests_cxx notification_test)
add_dependencies(buildtests_cxx num_external_connectivity_watchers_test)
+ add_dependencies(buildtests_cxx observable_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx oracle_event_engine_posix_test)
endif()
@@ -1638,7 +1639,6 @@
${_gRPC_ALLTARGETS_LIBRARIES}
absl::base
absl::core_headers
- absl::any_invocable
absl::memory
absl::random_random
absl::status
@@ -2312,7 +2312,6 @@
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/matchers/matchers.cc
src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
src/core/lib/promise/sleep.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
@@ -2416,7 +2415,6 @@
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
src/core/lib/transport/bdp_estimator.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
@@ -2508,6 +2506,7 @@
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
+ absl::any_invocable
absl::bind_front
absl::function_ref
absl::hash
@@ -3000,7 +2999,6 @@
src/core/lib/load_balancing/lb_policy.cc
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
src/core/lib/promise/sleep.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
@@ -3073,7 +3071,6 @@
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
src/core/lib/transport/bdp_estimator.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
@@ -3141,6 +3138,7 @@
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
+ absl::any_invocable
absl::bind_front
absl::function_ref
absl::hash
@@ -4518,7 +4516,6 @@
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/matchers/matchers.cc
src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
src/core/lib/resolver/resolver_registry.cc
@@ -4589,7 +4586,6 @@
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/handshaker.cc
@@ -4648,6 +4644,7 @@
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -5399,6 +5396,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::algorithm_container
+ absl::any_invocable
absl::span
${_gRPC_BENCHMARK_LIBRARIES}
gpr
@@ -8344,6 +8342,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -9092,6 +9091,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::statusor
gpr
)
@@ -10119,6 +10119,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::type_traits
absl::statusor
gpr
@@ -10628,6 +10629,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::hash
absl::type_traits
absl::statusor
@@ -11109,6 +11111,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -11179,6 +11182,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -11263,6 +11267,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::statusor
gpr
)
@@ -11572,7 +11577,6 @@
src/core/lib/load_balancing/lb_policy.cc
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
src/core/lib/resolver/resolver_registry.cc
@@ -11620,7 +11624,6 @@
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/handshaker_registry.cc
@@ -11665,6 +11668,7 @@
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -14076,6 +14080,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -14865,6 +14870,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@@ -15657,6 +15663,49 @@
endif()
if(gRPC_BUILD_TESTS)
+
+add_executable(observable_test
+ src/core/lib/promise/activity.cc
+ test/core/promise/observable_test.cc
+ third_party/googletest/googletest/src/gtest-all.cc
+ third_party/googletest/googlemock/src/gmock-all.cc
+)
+target_compile_features(observable_test PUBLIC cxx_std_14)
+target_include_directories(observable_test
+ PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/include
+ ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
+ ${_gRPC_RE2_INCLUDE_DIR}
+ ${_gRPC_SSL_INCLUDE_DIR}
+ ${_gRPC_UPB_GENERATED_DIR}
+ ${_gRPC_UPB_GRPC_GENERATED_DIR}
+ ${_gRPC_UPB_INCLUDE_DIR}
+ ${_gRPC_XXHASH_INCLUDE_DIR}
+ ${_gRPC_ZLIB_INCLUDE_DIR}
+ third_party/googletest/googletest/include
+ third_party/googletest/googletest
+ third_party/googletest/googlemock/include
+ third_party/googletest/googlemock
+ ${_gRPC_PROTO_GENS_DIR}
+)
+
+target_link_libraries(observable_test
+ ${_gRPC_BASELIB_LIBRARIES}
+ ${_gRPC_PROTOBUF_LIBRARIES}
+ ${_gRPC_ZLIB_LIBRARIES}
+ ${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::flat_hash_set
+ absl::hash
+ absl::type_traits
+ absl::statusor
+ absl::utility
+ gpr
+)
+
+
+endif()
+if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(oracle_event_engine_posix_test
@@ -16102,6 +16151,7 @@
if(gRPC_BUILD_TESTS)
add_executable(party_test
+ src/core/lib/promise/party.cc
test/core/promise/party_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
@@ -16221,6 +16271,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::function_ref
absl::hash
absl::statusor
@@ -19004,6 +19055,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::hash
absl::statusor
gpr
@@ -19483,6 +19535,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::span
gpr
)
@@ -20312,6 +20365,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::statusor
gpr
)
@@ -20354,6 +20408,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::statusor
gpr
)
@@ -20402,6 +20457,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::hash
absl::statusor
absl::utility
@@ -20517,6 +20573,7 @@
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
+ absl::any_invocable
absl::statusor
gpr
)
@@ -21008,6 +21065,7 @@
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
+ absl::any_invocable
absl::statusor
gpr
)
@@ -25960,7 +26018,7 @@
"gpr"
"gRPC platform support library"
"${gRPC_CORE_VERSION}"
- "absl_any_invocable absl_base absl_cord absl_core_headers absl_memory absl_optional absl_random_random absl_status absl_str_format absl_strings absl_synchronization absl_time absl_variant"
+ "absl_base absl_cord absl_core_headers absl_memory absl_optional absl_random_random absl_status absl_str_format absl_strings absl_synchronization absl_time absl_variant"
""
"-lgpr"
""
diff --git a/Makefile b/Makefile
index a3560aa..f571067 100644
--- a/Makefile
+++ b/Makefile
@@ -1561,7 +1561,6 @@
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/matchers/matchers.cc \
src/core/lib/promise/activity.cc \
- src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resolver/resolver.cc \
@@ -1665,7 +1664,6 @@
src/core/lib/surface/server.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
- src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
@@ -2103,7 +2101,6 @@
src/core/lib/load_balancing/lb_policy.cc \
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/promise/activity.cc \
- src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resolver/resolver.cc \
@@ -2176,7 +2173,6 @@
src/core/lib/surface/server.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
- src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml
index 517600a..577fbba 100644
--- a/build_autogenerated.yaml
+++ b/build_autogenerated.yaml
@@ -251,7 +251,6 @@
deps:
- absl/base:base
- absl/base:core_headers
- - absl/functional:any_invocable
- absl/memory:memory
- absl/random:random
- absl/status:status
@@ -951,13 +950,12 @@
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- - src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- - src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@@ -1061,7 +1059,6 @@
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- - src/core/lib/transport/batch_builder.h
- src/core/lib/transport/bdp_estimator.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
@@ -1713,7 +1710,6 @@
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/matchers/matchers.cc
- src/core/lib/promise/activity.cc
- - src/core/lib/promise/party.cc
- src/core/lib/promise/sleep.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
@@ -1817,7 +1813,6 @@
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- - src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/bdp_estimator.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
@@ -1869,6 +1864,7 @@
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
+ - absl/functional:any_invocable
- absl/functional:bind_front
- absl/functional:function_ref
- absl/hash:hash
@@ -2294,13 +2290,12 @@
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- - src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- - src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@@ -2375,7 +2370,6 @@
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- - src/core/lib/transport/batch_builder.h
- src/core/lib/transport/bdp_estimator.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
@@ -2669,7 +2663,6 @@
- src/core/lib/load_balancing/lb_policy.cc
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/promise/activity.cc
- - src/core/lib/promise/party.cc
- src/core/lib/promise/sleep.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
@@ -2742,7 +2735,6 @@
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- - src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/bdp_estimator.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
@@ -2770,6 +2762,7 @@
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
+ - absl/functional:any_invocable
- absl/functional:bind_front
- absl/functional:function_ref
- absl/hash:hash
@@ -3755,13 +3748,11 @@
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- - src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- - src/core/lib/promise/latch.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- - src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@@ -3835,7 +3826,6 @@
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- - src/core/lib/transport/batch_builder.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
- src/core/lib/transport/handshaker.h
@@ -4013,7 +4003,6 @@
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/matchers/matchers.cc
- src/core/lib/promise/activity.cc
- - src/core/lib/promise/party.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
- src/core/lib/resolver/resolver_registry.cc
@@ -4084,7 +4073,6 @@
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- - src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/handshaker.cc
@@ -4104,6 +4092,7 @@
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -4385,6 +4374,7 @@
- test/core/client_channel/lb_policy/static_stride_scheduler_benchmark.cc
deps:
- absl/algorithm:container
+ - absl/functional:any_invocable
- absl/types:span
- benchmark
- gpr
@@ -5867,6 +5857,7 @@
- test/core/gprpp/chunked_vector_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -6154,6 +6145,7 @@
src:
- test/core/event_engine/common_closures_test.cc
deps:
+ - absl/functional:any_invocable
- absl/status:statusor
- gpr
- name: completion_queue_threading_test
@@ -6582,6 +6574,7 @@
- src/core/lib/surface/channel_stack_type.cc
- test/core/event_engine/endpoint_config_test.cc
deps:
+ - absl/functional:any_invocable
- absl/meta:type_traits
- absl/status:statusor
- gpr
@@ -6857,6 +6850,7 @@
- src/core/lib/slice/slice_string_helpers.cc
- test/core/promise/exec_ctx_wakeup_scheduler_test.cc
deps:
+ - absl/functional:any_invocable
- absl/hash:hash
- absl/meta:type_traits
- absl/status:statusor
@@ -7161,6 +7155,7 @@
- test/core/transport/chttp2/flow_control_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -7207,6 +7202,7 @@
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/join.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
@@ -7258,6 +7254,7 @@
- test/core/promise/for_each_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -7291,6 +7288,7 @@
- test/core/event_engine/forkable_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/status:statusor
- gpr
- name: format_request_test
@@ -7559,13 +7557,11 @@
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- - src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- - src/core/lib/promise/latch.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- - src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@@ -7616,7 +7612,6 @@
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- - src/core/lib/transport/batch_builder.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
- src/core/lib/transport/handshaker_factory.h
@@ -7800,7 +7795,6 @@
- src/core/lib/load_balancing/lb_policy.cc
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/promise/activity.cc
- - src/core/lib/promise/party.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
- src/core/lib/resolver/resolver_registry.cc
@@ -7848,7 +7842,6 @@
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- - src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/handshaker_registry.cc
@@ -7866,6 +7859,7 @@
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -8955,6 +8949,7 @@
- test/core/promise/interceptor_list_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -9168,6 +9163,7 @@
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/join.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/poll.h
@@ -9310,6 +9306,7 @@
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
+ - src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/join.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
@@ -9362,6 +9359,7 @@
- test/core/promise/map_pipe_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@@ -9667,6 +9665,39 @@
- test/core/surface/num_external_connectivity_watchers_test.cc
deps:
- grpc_test_util
+- name: observable_test
+ gtest: true
+ build: test
+ language: c++
+ headers:
+ - src/core/lib/gprpp/atomic_utils.h
+ - src/core/lib/gprpp/orphanable.h
+ - src/core/lib/gprpp/ref_counted.h
+ - src/core/lib/gprpp/ref_counted_ptr.h
+ - src/core/lib/promise/activity.h
+ - src/core/lib/promise/context.h
+ - src/core/lib/promise/detail/basic_seq.h
+ - src/core/lib/promise/detail/promise_factory.h
+ - src/core/lib/promise/detail/promise_like.h
+ - src/core/lib/promise/detail/status.h
+ - src/core/lib/promise/detail/switch.h
+ - src/core/lib/promise/observable.h
+ - src/core/lib/promise/poll.h
+ - src/core/lib/promise/promise.h
+ - src/core/lib/promise/seq.h
+ - src/core/lib/promise/wait_set.h
+ - test/core/promise/test_wakeup_schedulers.h
+ src:
+ - src/core/lib/promise/activity.cc
+ - test/core/promise/observable_test.cc
+ deps:
+ - absl/container:flat_hash_set
+ - absl/hash:hash
+ - absl/meta:type_traits
+ - absl/status:statusor
+ - absl/utility:utility
+ - gpr
+ uses_polling: false
- name: oracle_event_engine_posix_test
gtest: true
build: test
@@ -9833,8 +9864,10 @@
gtest: true
build: test
language: c++
- headers: []
+ headers:
+ - src/core/lib/promise/party.h
src:
+ - src/core/lib/promise/party.cc
- test/core/promise/party_test.cc
deps:
- grpc_unsecure
@@ -9894,6 +9927,7 @@
- src/core/lib/slice/slice_string_helpers.cc
- test/core/resource_quota/periodic_update_test.cc
deps:
+ - absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/status:statusor
@@ -10072,6 +10106,7 @@
- src/core/lib/promise/detail/promise_factory.h
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/poll.h
+ - src/core/lib/promise/promise.h
src:
- test/core/promise/promise_factory_test.cc
deps:
@@ -11054,6 +11089,7 @@
- src/core/lib/slice/slice_string_helpers.cc
- test/core/slice/slice_string_helpers_test.cc
deps:
+ - absl/functional:any_invocable
- absl/hash:hash
- absl/status:statusor
- gpr
@@ -11239,6 +11275,7 @@
- src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc
- test/core/client_channel/lb_policy/static_stride_scheduler_test.cc
deps:
+ - absl/functional:any_invocable
- absl/types:span
- gpr
uses_polling: false
@@ -11643,6 +11680,7 @@
- src/core/lib/gprpp/time_averaged_stats.cc
- test/core/event_engine/posix/timer_heap_test.cc
deps:
+ - absl/functional:any_invocable
- absl/status:statusor
- gpr
uses_polling: false
@@ -11662,6 +11700,7 @@
- src/core/lib/gprpp/time_averaged_stats.cc
- test/core/event_engine/posix/timer_list_test.cc
deps:
+ - absl/functional:any_invocable
- absl/status:statusor
- gpr
uses_polling: false
@@ -11693,6 +11732,7 @@
- test/core/event_engine/slice_buffer_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/hash:hash
- absl/status:statusor
- absl/utility:utility
@@ -11727,6 +11767,7 @@
- src/core/lib/gprpp/time.cc
- test/core/gprpp/time_test.cc
deps:
+ - absl/functional:any_invocable
- absl/status:statusor
- gpr
uses_polling: false
@@ -11937,6 +11978,7 @@
- test/core/event_engine/thread_pool_test.cc
deps:
- absl/container:flat_hash_set
+ - absl/functional:any_invocable
- absl/status:statusor
- gpr
- name: thread_quota_test
diff --git a/config.m4 b/config.m4
index 410626e..92612e0 100644
--- a/config.m4
+++ b/config.m4
@@ -686,7 +686,6 @@
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/matchers/matchers.cc \
src/core/lib/promise/activity.cc \
- src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resolver/resolver.cc \
@@ -790,7 +789,6 @@
src/core/lib/surface/server.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
- src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
diff --git a/config.w32 b/config.w32
index d20340a..6da67e3 100644
--- a/config.w32
+++ b/config.w32
@@ -652,7 +652,6 @@
"src\\core\\lib\\load_balancing\\lb_policy_registry.cc " +
"src\\core\\lib\\matchers\\matchers.cc " +
"src\\core\\lib\\promise\\activity.cc " +
- "src\\core\\lib\\promise\\party.cc " +
"src\\core\\lib\\promise\\sleep.cc " +
"src\\core\\lib\\promise\\trace.cc " +
"src\\core\\lib\\resolver\\resolver.cc " +
@@ -756,7 +755,6 @@
"src\\core\\lib\\surface\\server.cc " +
"src\\core\\lib\\surface\\validate_metadata.cc " +
"src\\core\\lib\\surface\\version.cc " +
- "src\\core\\lib\\transport\\batch_builder.cc " +
"src\\core\\lib\\transport\\bdp_estimator.cc " +
"src\\core\\lib\\transport\\connectivity_state.cc " +
"src\\core\\lib\\transport\\error_utils.cc " +
diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec
index 8d4a012..5656c0b 100644
--- a/gRPC-C++.podspec
+++ b/gRPC-C++.podspec
@@ -920,13 +920,12 @@
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
- 'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
+ 'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
- 'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@@ -1030,7 +1029,6 @@
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/server.h',
'src/core/lib/surface/validate_metadata.h',
- 'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/error_utils.h',
@@ -1857,13 +1855,12 @@
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
- 'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
+ 'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
- 'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@@ -1967,7 +1964,6 @@
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/server.h',
'src/core/lib/surface/validate_metadata.h',
- 'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/error_utils.h',
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index 90f1bc1..5f1b196 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -1495,14 +1495,12 @@
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
- 'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
+ 'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
- 'src/core/lib/promise/party.cc',
- 'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@@ -1709,8 +1707,6 @@
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/version.cc',
- 'src/core/lib/transport/batch_builder.cc',
- 'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.cc',
@@ -2548,13 +2544,12 @@
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
- 'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
+ 'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
- 'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@@ -2658,7 +2653,6 @@
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/server.h',
'src/core/lib/surface/validate_metadata.h',
- 'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/error_utils.h',
diff --git a/grpc.gemspec b/grpc.gemspec
index 0fca512..fdbd975 100644
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -1404,14 +1404,12 @@
s.files += %w( src/core/lib/promise/detail/status.h )
s.files += %w( src/core/lib/promise/detail/switch.h )
s.files += %w( src/core/lib/promise/exec_ctx_wakeup_scheduler.h )
- s.files += %w( src/core/lib/promise/for_each.h )
s.files += %w( src/core/lib/promise/if.h )
s.files += %w( src/core/lib/promise/interceptor_list.h )
+ s.files += %w( src/core/lib/promise/intra_activity_waiter.h )
s.files += %w( src/core/lib/promise/latch.h )
s.files += %w( src/core/lib/promise/loop.h )
s.files += %w( src/core/lib/promise/map.h )
- s.files += %w( src/core/lib/promise/party.cc )
- s.files += %w( src/core/lib/promise/party.h )
s.files += %w( src/core/lib/promise/pipe.h )
s.files += %w( src/core/lib/promise/poll.h )
s.files += %w( src/core/lib/promise/promise.h )
@@ -1618,8 +1616,6 @@
s.files += %w( src/core/lib/surface/validate_metadata.cc )
s.files += %w( src/core/lib/surface/validate_metadata.h )
s.files += %w( src/core/lib/surface/version.cc )
- s.files += %w( src/core/lib/transport/batch_builder.cc )
- s.files += %w( src/core/lib/transport/batch_builder.h )
s.files += %w( src/core/lib/transport/bdp_estimator.cc )
s.files += %w( src/core/lib/transport/bdp_estimator.h )
s.files += %w( src/core/lib/transport/connectivity_state.cc )
diff --git a/grpc.gyp b/grpc.gyp
index eca8af8..cba8fd1 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -293,7 +293,6 @@
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
- 'absl/functional:any_invocable',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
@@ -360,6 +359,7 @@
'absl/container:flat_hash_map',
'absl/container:flat_hash_set',
'absl/container:inlined_vector',
+ 'absl/functional:any_invocable',
'absl/functional:bind_front',
'absl/functional:function_ref',
'absl/hash:hash',
@@ -974,7 +974,6 @@
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
- 'src/core/lib/promise/party.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
@@ -1078,7 +1077,6 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
- 'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
@@ -1178,6 +1176,7 @@
'absl/container:flat_hash_map',
'absl/container:flat_hash_set',
'absl/container:inlined_vector',
+ 'absl/functional:any_invocable',
'absl/functional:bind_front',
'absl/functional:function_ref',
'absl/hash:hash',
@@ -1457,7 +1456,6 @@
'src/core/lib/load_balancing/lb_policy.cc',
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/promise/activity.cc',
- 'src/core/lib/promise/party.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
@@ -1530,7 +1528,6 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
- 'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
@@ -1796,6 +1793,7 @@
'absl/container:flat_hash_map',
'absl/container:flat_hash_set',
'absl/container:inlined_vector',
+ 'absl/functional:any_invocable',
'absl/functional:function_ref',
'absl/hash:hash',
'absl/meta:type_traits',
@@ -1964,7 +1962,6 @@
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
- 'src/core/lib/promise/party.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
@@ -2035,7 +2032,6 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
- 'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
diff --git a/package.xml b/package.xml
index 7d2616b..bf49b72 100644
--- a/package.xml
+++ b/package.xml
@@ -1386,14 +1386,12 @@
<file baseinstalldir="/" name="src/core/lib/promise/detail/status.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/detail/switch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/exec_ctx_wakeup_scheduler.h" role="src" />
- <file baseinstalldir="/" name="src/core/lib/promise/for_each.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/if.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/interceptor_list.h" role="src" />
+ <file baseinstalldir="/" name="src/core/lib/promise/intra_activity_waiter.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/latch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/loop.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/map.h" role="src" />
- <file baseinstalldir="/" name="src/core/lib/promise/party.cc" role="src" />
- <file baseinstalldir="/" name="src/core/lib/promise/party.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/poll.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/promise.h" role="src" />
@@ -1600,8 +1598,6 @@
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/version.cc" role="src" />
- <file baseinstalldir="/" name="src/core/lib/transport/batch_builder.cc" role="src" />
- <file baseinstalldir="/" name="src/core/lib/transport/batch_builder.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.cc" role="src" />
diff --git a/src/core/BUILD b/src/core/BUILD
index 76b1b8d..dd2c2f5 100644
--- a/src/core/BUILD
+++ b/src/core/BUILD
@@ -413,6 +413,7 @@
],
external_deps = [
"absl/base:core_headers",
+ "absl/container:inlined_vector",
"absl/strings",
"absl/strings:str_format",
],
@@ -420,15 +421,9 @@
deps = [
"activity",
"arena",
- "construct_destruct",
- "context",
- "promise_factory",
"promise_trace",
- "//:debug_location",
- "//:exec_ctx",
"//:gpr",
"//:grpc_trace",
- "//:ref_counted_ptr",
],
)
@@ -576,7 +571,6 @@
"lib/promise/loop.h",
],
deps = [
- "construct_destruct",
"poll",
"promise_factory",
"//:gpr_platform",
@@ -702,7 +696,6 @@
external_deps = [
"absl/base:core_headers",
"absl/status",
- "absl/strings",
"absl/strings:str_format",
"absl/types:optional",
],
@@ -715,7 +708,6 @@
"construct_destruct",
"context",
"no_destruct",
- "poll",
"promise_factory",
"promise_status",
"//:gpr",
@@ -770,6 +762,19 @@
)
grpc_cc_library(
+ name = "intra_activity_waiter",
+ language = "c++",
+ public_hdrs = [
+ "lib/promise/intra_activity_waiter.h",
+ ],
+ deps = [
+ "activity",
+ "poll",
+ "//:gpr_platform",
+ ],
+)
+
+grpc_cc_library(
name = "latch",
external_deps = ["absl/strings"],
language = "c++",
@@ -778,6 +783,7 @@
],
deps = [
"activity",
+ "intra_activity_waiter",
"poll",
"promise_trace",
"//:gpr",
@@ -786,6 +792,25 @@
)
grpc_cc_library(
+ name = "observable",
+ external_deps = [
+ "absl/base:core_headers",
+ "absl/types:optional",
+ ],
+ language = "c++",
+ public_hdrs = [
+ "lib/promise/observable.h",
+ ],
+ deps = [
+ "activity",
+ "poll",
+ "promise_like",
+ "wait_set",
+ "//:gpr",
+ ],
+)
+
+grpc_cc_library(
name = "interceptor_list",
hdrs = [
"lib/promise/interceptor_list.h",
@@ -814,6 +839,7 @@
"lib/promise/pipe.h",
],
external_deps = [
+ "absl/base:core_headers",
"absl/strings",
"absl/types:optional",
"absl/types:variant",
@@ -825,6 +851,7 @@
"context",
"if",
"interceptor_list",
+ "intra_activity_waiter",
"map",
"poll",
"promise_trace",
@@ -3452,38 +3479,33 @@
"ext/filters/message_size/message_size_filter.h",
],
external_deps = [
- "absl/status:statusor",
+ "absl/status",
"absl/strings",
"absl/strings:str_format",
"absl/types:optional",
],
language = "c++",
deps = [
- "activity",
- "arena",
- "arena_promise",
"channel_args",
"channel_fwd",
"channel_init",
"channel_stack_type",
- "context",
+ "closure",
+ "error",
"grpc_service_config",
"json",
"json_args",
"json_object_loader",
- "latch",
- "poll",
- "race",
"service_config_parser",
- "slice",
"slice_buffer",
+ "status_helper",
"validation_errors",
"//:channel_stack_builder",
"//:config",
+ "//:debug_location",
"//:gpr",
"//:grpc_base",
"//:grpc_public_hdrs",
- "//:grpc_trace",
],
)
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index 957a474..bfc35c2 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -363,7 +363,7 @@
// TODO(roth): As part of simplifying cancellation in the filter stack,
// this should no longer need to be ref-counted.
class ClientChannel::LoadBalancedCall
- : public InternallyRefCounted<LoadBalancedCall, UnrefCallDtor> {
+ : public InternallyRefCounted<LoadBalancedCall, kUnrefCallDtor> {
public:
LoadBalancedCall(
ClientChannel* chand, grpc_call_context_element* call_context,
diff --git a/src/core/ext/filters/client_channel/retry_filter.cc b/src/core/ext/filters/client_channel/retry_filter.cc
index 886633a..0778cf8 100644
--- a/src/core/ext/filters/client_channel/retry_filter.cc
+++ b/src/core/ext/filters/client_channel/retry_filter.cc
@@ -269,7 +269,7 @@
// We allocate one struct on the arena for each attempt at starting a
// batch on a given LB call.
class BatchData
- : public RefCounted<BatchData, PolymorphicRefCount, UnrefCallDtor> {
+ : public RefCounted<BatchData, PolymorphicRefCount, kUnrefCallDtor> {
public:
BatchData(RefCountedPtr<CallAttempt> call_attempt, int refcount,
bool set_on_complete);
@@ -648,7 +648,7 @@
// on_call_stack_destruction closure from the surface.
class RetryFilter::CallData::CallStackDestructionBarrier
: public RefCounted<CallStackDestructionBarrier, PolymorphicRefCount,
- UnrefCallDtor> {
+ kUnrefCallDtor> {
public:
CallStackDestructionBarrier() {}
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index 89ff035..58f9a70 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -133,13 +133,13 @@
return std::move(md);
});
- return Race(initial_metadata_err->Wait(),
- Map(next_promise_factory(std::move(call_args)),
+ return Race(Map(next_promise_factory(std::move(call_args)),
[](ServerMetadataHandle md) -> ServerMetadataHandle {
auto r = CheckServerMetadata(md.get());
if (!r.ok()) return ServerMetadataFromStatus(r);
return md;
- }));
+ }),
+ initial_metadata_err->Wait());
}
HttpClientFilter::HttpClientFilter(HttpSchemeMetadata::ValueType scheme,
diff --git a/src/core/ext/filters/http/message_compress/compression_filter.cc b/src/core/ext/filters/http/message_compress/compression_filter.cc
index aea371d..a719f5b 100644
--- a/src/core/ext/filters/http/message_compress/compression_filter.cc
+++ b/src/core/ext/filters/http/message_compress/compression_filter.cc
@@ -252,7 +252,7 @@
return CompressMessage(std::move(message), compression_algorithm);
});
auto* decompress_args = GetContext<Arena>()->New<DecompressArgs>(
- DecompressArgs{GRPC_COMPRESS_ALGORITHMS_COUNT, absl::nullopt});
+ DecompressArgs{GRPC_COMPRESS_NONE, absl::nullopt});
auto* decompress_err =
GetContext<Arena>()->New<Latch<ServerMetadataHandle>>();
call_args.server_initial_metadata->InterceptAndMap(
@@ -273,8 +273,8 @@
return std::move(*r);
});
// Run the next filter, and race it with getting an error from decompression.
- return Race(decompress_err->Wait(),
- next_promise_factory(std::move(call_args)));
+ return Race(next_promise_factory(std::move(call_args)),
+ decompress_err->Wait());
}
ArenaPromise<ServerMetadataHandle> ServerCompressionFilter::MakeCallPromise(
@@ -288,8 +288,7 @@
this](MessageHandle message) -> absl::optional<MessageHandle> {
auto r = DecompressMessage(std::move(message), decompress_args);
if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[compression] DecompressMessage returned %s",
- Activity::current()->DebugTag().c_str(),
+ gpr_log(GPR_DEBUG, "DecompressMessage returned %s",
r.status().ToString().c_str());
}
if (!r.ok()) {
@@ -315,9 +314,13 @@
this](MessageHandle message) -> absl::optional<MessageHandle> {
return CompressMessage(std::move(message), *compression_algorithm);
});
- // Run the next filter, and race it with getting an error from decompression.
- return Race(decompress_err->Wait(),
- next_promise_factory(std::move(call_args)));
+ // Concurrently:
+ // - call the next filter
+ // - decompress incoming messages
+ // - wait for initial metadata to be sent, and then commence compression of
+ // outgoing messages
+ return Race(next_promise_factory(std::move(call_args)),
+ decompress_err->Wait());
}
} // namespace grpc_core
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 6143239..33ff178 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -18,13 +18,10 @@
#include "src/core/ext/filters/message_size/message_size_filter.h"
-#include <inttypes.h>
-
-#include <functional>
#include <initializer_list>
-#include <string>
-#include <utility>
+#include <new>
+#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <grpc/grpc.h>
@@ -35,22 +32,21 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/config/core_configuration.h"
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/promise/activity.h"
-#include "src/core/lib/promise/context.h"
-#include "src/core/lib/promise/latch.h"
-#include "src/core/lib/promise/poll.h"
-#include "src/core/lib/promise/race.h"
-#include "src/core/lib/resource_quota/arena.h"
+#include "src/core/lib/gprpp/debug_location.h"
+#include "src/core/lib/gprpp/status_helper.h"
+#include "src/core/lib/iomgr/call_combiner.h"
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/service_config/service_config_call_data.h"
-#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
-#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/surface/channel_stack_type.h"
-#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
+static void recv_message_ready(void* user_data, grpc_error_handle error);
+static void recv_trailing_metadata_ready(void* user_data,
+ grpc_error_handle error);
+
namespace grpc_core {
//
@@ -128,164 +124,251 @@
parser_name());
}
-//
-// MessageSizeFilter
-//
-
-const grpc_channel_filter ClientMessageSizeFilter::kFilter =
- MakePromiseBasedFilter<ClientMessageSizeFilter, FilterEndpoint::kClient,
- kFilterExaminesOutboundMessages |
- kFilterExaminesInboundMessages>("message_size");
-const grpc_channel_filter ServerMessageSizeFilter::kFilter =
- MakePromiseBasedFilter<ServerMessageSizeFilter, FilterEndpoint::kServer,
- kFilterExaminesOutboundMessages |
- kFilterExaminesInboundMessages>("message_size");
-
-class MessageSizeFilter::CallBuilder {
- private:
- auto Interceptor(uint32_t max_length, bool is_send) {
- return [max_length, is_send,
- err = err_](MessageHandle msg) -> absl::optional<MessageHandle> {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO, "%s[message_size] %s len:%" PRIdPTR " max:%d",
- Activity::current()->DebugTag().c_str(),
- is_send ? "send" : "recv", msg->payload()->Length(),
- max_length);
- }
- if (msg->payload()->Length() > max_length) {
- if (err->is_set()) return std::move(msg);
- auto r = GetContext<Arena>()->MakePooled<ServerMetadata>(
- GetContext<Arena>());
- r->Set(GrpcStatusMetadata(), GRPC_STATUS_RESOURCE_EXHAUSTED);
- r->Set(GrpcMessageMetadata(),
- Slice::FromCopiedString(
- absl::StrFormat("%s message larger than max (%u vs. %d)",
- is_send ? "Sent" : "Received",
- msg->payload()->Length(), max_length)));
- err->Set(std::move(r));
- return absl::nullopt;
- }
- return std::move(msg);
- };
- }
-
- public:
- explicit CallBuilder(const MessageSizeParsedConfig& limits)
- : limits_(limits) {}
-
- template <typename T>
- void AddSend(T* pipe_end) {
- if (!limits_.max_send_size().has_value()) return;
- pipe_end->InterceptAndMap(Interceptor(*limits_.max_send_size(), true));
- }
- template <typename T>
- void AddRecv(T* pipe_end) {
- if (!limits_.max_recv_size().has_value()) return;
- pipe_end->InterceptAndMap(Interceptor(*limits_.max_recv_size(), false));
- }
-
- ArenaPromise<ServerMetadataHandle> Run(
- CallArgs call_args, NextPromiseFactory next_promise_factory) {
- return Race(err_->Wait(), next_promise_factory(std::move(call_args)));
- }
-
- private:
- Latch<ServerMetadataHandle>* const err_ =
- GetContext<Arena>()->ManagedNew<Latch<ServerMetadataHandle>>();
- MessageSizeParsedConfig limits_;
-};
-
-absl::StatusOr<ClientMessageSizeFilter> ClientMessageSizeFilter::Create(
- const ChannelArgs& args, ChannelFilter::Args) {
- return ClientMessageSizeFilter(args);
-}
-
-absl::StatusOr<ServerMessageSizeFilter> ServerMessageSizeFilter::Create(
- const ChannelArgs& args, ChannelFilter::Args) {
- return ServerMessageSizeFilter(args);
-}
-
-ArenaPromise<ServerMetadataHandle> ClientMessageSizeFilter::MakeCallPromise(
- CallArgs call_args, NextPromiseFactory next_promise_factory) {
- // Get max sizes from channel data, then merge in per-method config values.
- // Note: Per-method config is only available on the client, so we
- // apply the max request size to the send limit and the max response
- // size to the receive limit.
- MessageSizeParsedConfig limits = this->limits();
- const MessageSizeParsedConfig* config_from_call_context =
- MessageSizeParsedConfig::GetFromCallContext(
- GetContext<grpc_call_context_element>(),
- service_config_parser_index_);
- if (config_from_call_context != nullptr) {
- absl::optional<uint32_t> max_send_size = limits.max_send_size();
- absl::optional<uint32_t> max_recv_size = limits.max_recv_size();
- if (config_from_call_context->max_send_size().has_value() &&
- (!max_send_size.has_value() ||
- *config_from_call_context->max_send_size() < *max_send_size)) {
- max_send_size = *config_from_call_context->max_send_size();
- }
- if (config_from_call_context->max_recv_size().has_value() &&
- (!max_recv_size.has_value() ||
- *config_from_call_context->max_recv_size() < *max_recv_size)) {
- max_recv_size = *config_from_call_context->max_recv_size();
- }
- limits = MessageSizeParsedConfig(max_send_size, max_recv_size);
- }
-
- CallBuilder b(limits);
- b.AddSend(call_args.client_to_server_messages);
- b.AddRecv(call_args.server_to_client_messages);
- return b.Run(std::move(call_args), std::move(next_promise_factory));
-}
-
-ArenaPromise<ServerMetadataHandle> ServerMessageSizeFilter::MakeCallPromise(
- CallArgs call_args, NextPromiseFactory next_promise_factory) {
- CallBuilder b(limits());
- b.AddSend(call_args.server_to_client_messages);
- b.AddRecv(call_args.client_to_server_messages);
- return b.Run(std::move(call_args), std::move(next_promise_factory));
-}
+} // namespace grpc_core
namespace {
+struct channel_data {
+ grpc_core::MessageSizeParsedConfig limits;
+ const size_t service_config_parser_index{
+ grpc_core::MessageSizeParser::ParserIndex()};
+};
+
+struct call_data {
+ call_data(grpc_call_element* elem, const channel_data& chand,
+ const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner), limits(chand.limits) {
+ GRPC_CLOSURE_INIT(&recv_message_ready, ::recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ // Get max sizes from channel data, then merge in per-method config values.
+ // Note: Per-method config is only available on the client, so we
+ // apply the max request size to the send limit and the max response
+ // size to the receive limit.
+ const grpc_core::MessageSizeParsedConfig* config_from_call_context =
+ grpc_core::MessageSizeParsedConfig::GetFromCallContext(
+ args.context, chand.service_config_parser_index);
+ if (config_from_call_context != nullptr) {
+ absl::optional<uint32_t> max_send_size = limits.max_send_size();
+ absl::optional<uint32_t> max_recv_size = limits.max_recv_size();
+ if (config_from_call_context->max_send_size().has_value() &&
+ (!max_send_size.has_value() ||
+ *config_from_call_context->max_send_size() < *max_send_size)) {
+ max_send_size = *config_from_call_context->max_send_size();
+ }
+ if (config_from_call_context->max_recv_size().has_value() &&
+ (!max_recv_size.has_value() ||
+ *config_from_call_context->max_recv_size() < *max_recv_size)) {
+ max_recv_size = *config_from_call_context->max_recv_size();
+ }
+ limits = grpc_core::MessageSizeParsedConfig(max_send_size, max_recv_size);
+ }
+ }
+
+ ~call_data() {}
+
+ grpc_core::CallCombiner* call_combiner;
+ grpc_core::MessageSizeParsedConfig limits;
+ // Receive closures are chained: we inject this closure as the
+ // recv_message_ready up-call on transport_stream_op, and remember to
+ // call our next_recv_message_ready member after handling it.
+ grpc_closure recv_message_ready;
+ grpc_closure recv_trailing_metadata_ready;
+ // The error caused by a message that is too large, or absl::OkStatus()
+ grpc_error_handle error;
+ // Used by recv_message_ready.
+ absl::optional<grpc_core::SliceBuffer>* recv_message = nullptr;
+ // Original recv_message_ready callback, invoked after our own.
+ grpc_closure* next_recv_message_ready = nullptr;
+ // Original recv_trailing_metadata callback, invoked after our own.
+ grpc_closure* original_recv_trailing_metadata_ready;
+ bool seen_recv_trailing_metadata = false;
+ grpc_error_handle recv_trailing_metadata_error;
+};
+
+} // namespace
+
+// Callback invoked when we receive a message. Here we check the max
+// receive message size.
+static void recv_message_ready(void* user_data, grpc_error_handle error) {
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (calld->recv_message->has_value() &&
+ calld->limits.max_recv_size().has_value() &&
+ (*calld->recv_message)->Length() >
+ static_cast<size_t>(*calld->limits.max_recv_size())) {
+ grpc_error_handle new_error = grpc_error_set_int(
+ GRPC_ERROR_CREATE(absl::StrFormat(
+ "Received message larger than max (%u vs. %d)",
+ (*calld->recv_message)->Length(), *calld->limits.max_recv_size())),
+ grpc_core::StatusIntProperty::kRpcStatus,
+ GRPC_STATUS_RESOURCE_EXHAUSTED);
+ error = grpc_error_add_child(error, new_error);
+ calld->error = error;
+ }
+ // Invoke the next callback.
+ grpc_closure* closure = calld->next_recv_message_ready;
+ calld->next_recv_message_ready = nullptr;
+ if (calld->seen_recv_trailing_metadata) {
+ // We might potentially see another RECV_MESSAGE op. In that case, we do not
+ // want to run the recv_trailing_metadata_ready closure again. The newer
+ // RECV_MESSAGE op cannot cause any errors since the transport has already
+ // invoked the recv_trailing_metadata_ready closure and all further
+ // RECV_MESSAGE ops will get null payloads.
+ calld->seen_recv_trailing_metadata = false;
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &calld->recv_trailing_metadata_ready,
+ calld->recv_trailing_metadata_error,
+ "continue recv_trailing_metadata_ready");
+ }
+ grpc_core::Closure::Run(DEBUG_LOCATION, closure, error);
+}
+
+// Callback invoked on completion of recv_trailing_metadata
+// Notifies the recv_trailing_metadata batch of any message size failures
+static void recv_trailing_metadata_ready(void* user_data,
+ grpc_error_handle error) {
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (calld->next_recv_message_ready != nullptr) {
+ calld->seen_recv_trailing_metadata = true;
+ calld->recv_trailing_metadata_error = error;
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
+ "deferring recv_trailing_metadata_ready until "
+ "after recv_message_ready");
+ return;
+ }
+ error = grpc_error_add_child(error, calld->error);
+ // Invoke the next callback.
+ grpc_core::Closure::Run(DEBUG_LOCATION,
+ calld->original_recv_trailing_metadata_ready, error);
+}
+
+// Start transport stream op.
+static void message_size_start_transport_stream_op_batch(
+ grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ // Check max send message size.
+ if (op->send_message && calld->limits.max_send_size().has_value() &&
+ op->payload->send_message.send_message->Length() >
+ static_cast<size_t>(*calld->limits.max_send_size())) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ op,
+ grpc_error_set_int(GRPC_ERROR_CREATE(absl::StrFormat(
+ "Sent message larger than max (%u vs. %d)",
+ op->payload->send_message.send_message->Length(),
+ *calld->limits.max_send_size())),
+ grpc_core::StatusIntProperty::kRpcStatus,
+ GRPC_STATUS_RESOURCE_EXHAUSTED),
+ calld->call_combiner);
+ return;
+ }
+ // Inject callback for receiving a message.
+ if (op->recv_message) {
+ calld->next_recv_message_ready =
+ op->payload->recv_message.recv_message_ready;
+ calld->recv_message = op->payload->recv_message.recv_message;
+ op->payload->recv_message.recv_message_ready = &calld->recv_message_ready;
+ }
+ // Inject callback for receiving trailing metadata.
+ if (op->recv_trailing_metadata) {
+ calld->original_recv_trailing_metadata_ready =
+ op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
+ op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
+ &calld->recv_trailing_metadata_ready;
+ }
+ // Chain to the next filter.
+ grpc_call_next_op(elem, op);
+}
+
+// Constructor for call_data.
+static grpc_error_handle message_size_init_call_elem(
+ grpc_call_element* elem, const grpc_call_element_args* args) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ new (elem->call_data) call_data(elem, *chand, *args);
+ return absl::OkStatus();
+}
+
+// Destructor for call_data.
+static void message_size_destroy_call_elem(
+ grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
+ grpc_closure* /*ignored*/) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ calld->~call_data();
+}
+
+// Constructor for channel_data.
+static grpc_error_handle message_size_init_channel_elem(
+ grpc_channel_element* elem, grpc_channel_element_args* args) {
+ GPR_ASSERT(!args->is_last);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ new (chand) channel_data();
+ chand->limits = grpc_core::MessageSizeParsedConfig::GetFromChannelArgs(
+ args->channel_args);
+ return absl::OkStatus();
+}
+
+// Destructor for channel_data.
+static void message_size_destroy_channel_elem(grpc_channel_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ chand->~channel_data();
+}
+
+const grpc_channel_filter grpc_message_size_filter = {
+ message_size_start_transport_stream_op_batch,
+ nullptr,
+ grpc_channel_next_op,
+ sizeof(call_data),
+ message_size_init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ message_size_destroy_call_elem,
+ sizeof(channel_data),
+ message_size_init_channel_elem,
+ grpc_channel_stack_no_post_init,
+ message_size_destroy_channel_elem,
+ grpc_channel_next_get_info,
+ "message_size"};
+
// Used for GRPC_CLIENT_SUBCHANNEL
-bool MaybeAddMessageSizeFilterToSubchannel(ChannelStackBuilder* builder) {
+static bool maybe_add_message_size_filter_subchannel(
+ grpc_core::ChannelStackBuilder* builder) {
if (builder->channel_args().WantMinimalStack()) {
return true;
}
- builder->PrependFilter(&ClientMessageSizeFilter::kFilter);
+ builder->PrependFilter(&grpc_message_size_filter);
return true;
}
-// Used for GRPC_CLIENT_DIRECT_CHANNEL and GRPC_SERVER_CHANNEL. Adds the
-// filter only if message size limits or service config is specified.
-auto MaybeAddMessageSizeFilter(const grpc_channel_filter* filter) {
- return [filter](ChannelStackBuilder* builder) {
- auto channel_args = builder->channel_args();
- if (channel_args.WantMinimalStack()) {
- return true;
- }
- MessageSizeParsedConfig limits =
- MessageSizeParsedConfig::GetFromChannelArgs(channel_args);
- const bool enable =
- limits.max_send_size().has_value() ||
- limits.max_recv_size().has_value() ||
- channel_args.GetString(GRPC_ARG_SERVICE_CONFIG).has_value();
- if (enable) builder->PrependFilter(filter);
+// Used for GRPC_CLIENT_DIRECT_CHANNEL and GRPC_SERVER_CHANNEL. Adds the filter
+// only if message size limits or service config is specified.
+static bool maybe_add_message_size_filter(
+ grpc_core::ChannelStackBuilder* builder) {
+ auto channel_args = builder->channel_args();
+ if (channel_args.WantMinimalStack()) {
return true;
- };
+ }
+ grpc_core::MessageSizeParsedConfig limits =
+ grpc_core::MessageSizeParsedConfig::GetFromChannelArgs(channel_args);
+ const bool enable =
+ limits.max_send_size().has_value() ||
+ limits.max_recv_size().has_value() ||
+ channel_args.GetString(GRPC_ARG_SERVICE_CONFIG).has_value();
+ if (enable) builder->PrependFilter(&grpc_message_size_filter);
+ return true;
}
-} // namespace
+namespace grpc_core {
void RegisterMessageSizeFilter(CoreConfiguration::Builder* builder) {
MessageSizeParser::Register(builder);
- builder->channel_init()->RegisterStage(GRPC_CLIENT_SUBCHANNEL,
+ builder->channel_init()->RegisterStage(
+ GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
+ maybe_add_message_size_filter_subchannel);
+ builder->channel_init()->RegisterStage(GRPC_CLIENT_DIRECT_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- MaybeAddMessageSizeFilterToSubchannel);
- builder->channel_init()->RegisterStage(
- GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- MaybeAddMessageSizeFilter(&ClientMessageSizeFilter::kFilter));
- builder->channel_init()->RegisterStage(
- GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- MaybeAddMessageSizeFilter(&ServerMessageSizeFilter::kFilter));
+ maybe_add_message_size_filter);
+ builder->channel_init()->RegisterStage(GRPC_SERVER_CHANNEL,
+ GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
+ maybe_add_message_size_filter);
}
} // namespace grpc_core
diff --git a/src/core/ext/filters/message_size/message_size_filter.h b/src/core/ext/filters/message_size/message_size_filter.h
index 75135a1..e47485a 100644
--- a/src/core/ext/filters/message_size/message_size_filter.h
+++ b/src/core/ext/filters/message_size/message_size_filter.h
@@ -24,22 +24,21 @@
#include <memory>
-#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_fwd.h"
+#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
-#include "src/core/lib/channel/promise_based_filter.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/gprpp/validation_errors.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/json/json_args.h"
#include "src/core/lib/json/json_object_loader.h"
-#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/service_config/service_config_parser.h"
-#include "src/core/lib/transport/transport.h"
+
+extern const grpc_channel_filter grpc_message_size_filter;
namespace grpc_core {
@@ -86,50 +85,6 @@
absl::optional<uint32_t> GetMaxRecvSizeFromChannelArgs(const ChannelArgs& args);
absl::optional<uint32_t> GetMaxSendSizeFromChannelArgs(const ChannelArgs& args);
-class MessageSizeFilter : public ChannelFilter {
- protected:
- explicit MessageSizeFilter(const ChannelArgs& args)
- : limits_(MessageSizeParsedConfig::GetFromChannelArgs(args)) {}
-
- class CallBuilder;
-
- const MessageSizeParsedConfig& limits() const { return limits_; }
-
- private:
- MessageSizeParsedConfig limits_;
-};
-
-class ServerMessageSizeFilter final : public MessageSizeFilter {
- public:
- static const grpc_channel_filter kFilter;
-
- static absl::StatusOr<ServerMessageSizeFilter> Create(
- const ChannelArgs& args, ChannelFilter::Args filter_args);
-
- // Construct a promise for one call.
- ArenaPromise<ServerMetadataHandle> MakeCallPromise(
- CallArgs call_args, NextPromiseFactory next_promise_factory) override;
-
- private:
- using MessageSizeFilter::MessageSizeFilter;
-};
-
-class ClientMessageSizeFilter final : public MessageSizeFilter {
- public:
- static const grpc_channel_filter kFilter;
-
- static absl::StatusOr<ClientMessageSizeFilter> Create(
- const ChannelArgs& args, ChannelFilter::Args filter_args);
-
- // Construct a promise for one call.
- ArenaPromise<ServerMetadataHandle> MakeCallPromise(
- CallArgs call_args, NextPromiseFactory next_promise_factory) override;
-
- private:
- const size_t service_config_parser_index_{MessageSizeParser::ParserIndex()};
- using MessageSizeFilter::MessageSizeFilter;
-};
-
} // namespace grpc_core
#endif // GRPC_SRC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H
diff --git a/src/core/ext/transport/binder/transport/binder_transport.cc b/src/core/ext/transport/binder/transport/binder_transport.cc
index 0420e96..38ccbc6 100644
--- a/src/core/ext/transport/binder/transport/binder_transport.cc
+++ b/src/core/ext/transport/binder/transport/binder_transport.cc
@@ -694,7 +694,6 @@
// See grpc_transport_vtable declaration for meaning of each field
static const grpc_transport_vtable vtable = {sizeof(grpc_binder_stream),
- false,
"binder",
init_stream,
nullptr,
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 9bd3ed5..3261618 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -1210,8 +1210,7 @@
grpc_chttp2_stream* s,
grpc_closure** pclosure,
grpc_error_handle error,
- const char* desc,
- grpc_core::DebugLocation whence) {
+ const char* desc) {
grpc_closure* closure = *pclosure;
*pclosure = nullptr;
if (closure == nullptr) {
@@ -1222,14 +1221,14 @@
gpr_log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
- "write_state=%s whence=%s:%d",
+ "write_state=%s",
t, closure,
static_cast<int>(closure->next_data.scratch /
CLOSURE_BARRIER_FIRST_REF_BIT),
static_cast<int>(closure->next_data.scratch %
CLOSURE_BARRIER_FIRST_REF_BIT),
desc, grpc_core::StatusToString(error).c_str(),
- write_state_name(t->write_state), whence.file(), whence.line());
+ write_state_name(t->write_state));
}
if (s->context != nullptr) {
@@ -3079,7 +3078,6 @@
}
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
- false,
"chttp2",
init_stream,
nullptr,
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index a89b003..8ad1a17 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -709,8 +709,7 @@
grpc_chttp2_stream* s,
grpc_closure** pclosure,
grpc_error_handle error,
- const char* desc,
- grpc_core::DebugLocation whence = {});
+ const char* desc);
#define GRPC_HEADER_SIZE_IN_BYTES 5
#define MAX_SIZE_T (~(size_t)0)
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index e1dfbb3..77cf88b 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.cc
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -1462,7 +1462,6 @@
static const grpc_transport_vtable grpc_cronet_vtable = {
sizeof(stream_obj),
- false,
"cronet_http",
init_stream,
nullptr,
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index b4185e4..dc6b480 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -408,7 +408,7 @@
int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
- INPROC_LOG(GPR_INFO, "%s %p %p %p %s", msg, s, op, op->on_complete,
+ INPROC_LOG(GPR_INFO, "%s %p %p %s", msg, s, op,
grpc_core::StatusToString(error).c_str());
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete, error);
}
@@ -697,9 +697,8 @@
s->to_read_initial_md_filled = false;
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
- std::exchange(s->recv_initial_md_op->payload->recv_initial_metadata
- .recv_initial_metadata_ready,
- nullptr),
+ s->recv_initial_md_op->payload->recv_initial_metadata
+ .recv_initial_metadata_ready,
absl::OkStatus());
complete_if_batch_end_locked(
s, absl::OkStatus(), s->recv_initial_md_op,
@@ -767,8 +766,6 @@
nullptr);
s->to_read_trailing_md.Clear();
s->to_read_trailing_md_filled = false;
- s->recv_trailing_md_op->payload->recv_trailing_metadata
- .recv_trailing_metadata->Set(grpc_core::GrpcStatusFromWire(), true);
// We should schedule the recv_trailing_md_op completion if
// 1. this stream is the client-side
@@ -909,6 +906,8 @@
return ret;
}
+void do_nothing(void* /*arg*/, grpc_error_handle /*error*/) {}
+
void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
@@ -934,8 +933,8 @@
// completed). This can go away once we move to a new C++ closure API
// that provides the ability to create a barrier closure.
if (on_complete == nullptr) {
- on_complete = op->on_complete =
- grpc_core::NewClosure([](grpc_error_handle) {});
+ on_complete = GRPC_CLOSURE_INIT(&op->handler_private.closure, do_nothing,
+ nullptr, grpc_schedule_on_exec_ctx);
}
if (op->cancel_stream) {
@@ -1178,18 +1177,13 @@
grpc_endpoint* get_endpoint(grpc_transport* /*t*/) { return nullptr; }
-const grpc_transport_vtable inproc_vtable = {sizeof(inproc_stream),
- true,
- "inproc",
- init_stream,
- nullptr,
- set_pollset,
- set_pollset_set,
- perform_stream_op,
- perform_transport_op,
- destroy_stream,
- destroy_transport,
- get_endpoint};
+const grpc_transport_vtable inproc_vtable = {
+ sizeof(inproc_stream), "inproc",
+ init_stream, nullptr,
+ set_pollset, set_pollset_set,
+ perform_stream_op, perform_transport_op,
+ destroy_stream, destroy_transport,
+ get_endpoint};
//******************************************************************************
// Main inproc transport functions
diff --git a/src/core/lib/channel/connected_channel.cc b/src/core/lib/channel/connected_channel.cc
index fa66027..40582f9 100644
--- a/src/core/lib/channel/connected_channel.cc
+++ b/src/core/lib/channel/connected_channel.cc
@@ -21,16 +21,21 @@
#include "src/core/lib/channel/connected_channel.h"
#include <inttypes.h>
+#include <string.h>
+#include <algorithm>
#include <functional>
#include <initializer_list>
#include <memory>
#include <string>
-#include <type_traits>
#include <utility>
+#include <vector>
+#include "absl/base/thread_annotations.h"
+#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
-#include "absl/status/statusor.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
@@ -42,48 +47,39 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_fwd.h"
#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/channel/context.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/debug_location.h"
+#include "src/core/lib/gprpp/match.h"
#include "src/core/lib/gprpp/orphanable.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/gprpp/time.h"
+#include "src/core/lib/gprpp/status_helper.h"
+#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/promise/context.h"
-#include "src/core/lib/promise/detail/basic_join.h"
#include "src/core/lib/promise/detail/basic_seq.h"
-#include "src/core/lib/promise/for_each.h"
-#include "src/core/lib/promise/if.h"
-#include "src/core/lib/promise/latch.h"
-#include "src/core/lib/promise/loop.h"
-#include "src/core/lib/promise/map.h"
-#include "src/core/lib/promise/party.h"
#include "src/core/lib/promise/pipe.h"
#include "src/core/lib/promise/poll.h"
-#include "src/core/lib/promise/promise.h"
-#include "src/core/lib/promise/race.h"
-#include "src/core/lib/promise/seq.h"
-#include "src/core/lib/promise/try_join.h"
-#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_stack_type.h"
-#include "src/core/lib/transport/batch_builder.h"
-#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
+#define MAX_BUFFER_LENGTH 8192
+
typedef struct connected_channel_channel_data {
grpc_transport* transport;
} channel_data;
@@ -256,24 +252,10 @@
defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
class ConnectedChannelStream : public Orphanable {
public:
- explicit ConnectedChannelStream(grpc_transport* transport)
- : transport_(transport), stream_(nullptr, StreamDeleter(this)) {
- GRPC_STREAM_REF_INIT(
- &stream_refcount_, 1,
- [](void* p, grpc_error_handle) {
- static_cast<ConnectedChannelStream*>(p)->BeginDestroy();
- },
- this, "ConnectedChannelStream");
- }
-
grpc_transport* transport() { return transport_; }
grpc_closure* stream_destroyed_closure() { return &stream_destroyed_; }
- BatchBuilder::Target batch_target() {
- return BatchBuilder::Target{transport_, stream_.get(), &stream_refcount_};
- }
-
- void IncrementRefCount(const char* reason = "smartptr") {
+ void IncrementRefCount(const char* reason) {
#ifndef NDEBUG
grpc_stream_ref(&stream_refcount_, reason);
#else
@@ -282,7 +264,7 @@
#endif
}
- void Unref(const char* reason = "smartptr") {
+ void Unref(const char* reason) {
#ifndef NDEBUG
grpc_stream_unref(&stream_refcount_, reason);
#else
@@ -291,48 +273,235 @@
#endif
}
- RefCountedPtr<ConnectedChannelStream> InternalRef() {
- IncrementRefCount("smartptr");
- return RefCountedPtr<ConnectedChannelStream>(this);
- }
-
void Orphan() final {
- bool finished = finished_.IsSet();
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Orphan stream, finished: %d",
- party_->DebugTag().c_str(), finished);
+ bool finished;
+ {
+ MutexLock lock(mu());
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] DropStream: %s finished=%s",
+ Activity::current()->DebugTag().c_str(),
+ ActiveOpsString().c_str(), finished_ ? "true" : "false");
+ }
+ finished = finished_;
}
// If we hadn't already observed the stream to be finished, we need to
// cancel it at the transport.
if (!finished) {
- party_->Spawn(
- "finish",
- [self = InternalRef()]() {
- if (!self->finished_.IsSet()) {
- self->finished_.Set();
- }
- return Empty{};
- },
- [](Empty) {});
- GetContext<BatchBuilder>()->Cancel(batch_target(),
- absl::CancelledError());
+ IncrementRefCount("shutdown client stream");
+ auto* cancel_op =
+ GetContext<Arena>()->New<grpc_transport_stream_op_batch>();
+ cancel_op->cancel_stream = true;
+ cancel_op->payload = batch_payload();
+ auto* s = stream();
+ cancel_op->on_complete = NewClosure(
+ [this](grpc_error_handle) { Unref("shutdown client stream"); });
+ batch_payload()->cancel_stream.cancel_error = absl::CancelledError();
+ grpc_transport_perform_stream_op(transport(), s, cancel_op);
}
- Unref("orphan connected stream");
+ Unref("orphan client stream");
}
- // Returns a promise that implements the receive message loop.
- auto RecvMessages(PipeSender<MessageHandle>* incoming_messages);
- // Returns a promise that implements the send message loop.
- auto SendMessages(PipeReceiver<MessageHandle>* outgoing_messages);
+ protected:
+ explicit ConnectedChannelStream(grpc_transport* transport)
+ : transport_(transport), stream_(nullptr, StreamDeleter(this)) {
+ call_context_->IncrementRefCount("connected_channel_stream");
+ GRPC_STREAM_REF_INIT(
+ &stream_refcount_, 1,
+ [](void* p, grpc_error_handle) {
+ static_cast<ConnectedChannelStream*>(p)->BeginDestroy();
+ },
+ this, "client_stream");
+ }
- void SetStream(grpc_stream* stream) { stream_.reset(stream); }
grpc_stream* stream() { return stream_.get(); }
+ void SetStream(grpc_stream* stream) { stream_.reset(stream); }
grpc_stream_refcount* stream_refcount() { return &stream_refcount_; }
+ Mutex* mu() const ABSL_LOCK_RETURNED(mu_) { return &mu_; }
+ grpc_transport_stream_op_batch_payload* batch_payload() {
+ return &batch_payload_;
+ }
+ bool finished() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return finished_; }
+ void set_finished() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) { finished_ = true; }
+ virtual std::string ActiveOpsString() const
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) = 0;
- void set_finished() { finished_.Set(); }
- auto WaitFinished() { return finished_.Wait(); }
+ void SchedulePush(grpc_transport_stream_op_batch* batch)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ batch->is_traced = GetContext<CallContext>()->traced();
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "%s[connected] Push batch to transport: %s",
+ Activity::current()->DebugTag().c_str(),
+ grpc_transport_stream_op_batch_string(batch, false).c_str());
+ }
+ if (push_batches_.empty()) {
+ IncrementRefCount("push");
+ ExecCtx::Run(DEBUG_LOCATION, &push_, absl::OkStatus());
+ }
+ push_batches_.push_back(batch);
+ }
+
+ void PollSendMessage(PipeReceiver<MessageHandle>* outgoing_messages,
+ ClientMetadataHandle* client_trailing_metadata)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ if (absl::holds_alternative<Closed>(send_message_state_)) {
+ message_to_send_.reset();
+ }
+ if (absl::holds_alternative<Idle>(send_message_state_)) {
+ message_to_send_.reset();
+ send_message_state_.emplace<PipeReceiverNextType<MessageHandle>>(
+ outgoing_messages->Next());
+ }
+ if (auto* next = absl::get_if<PipeReceiverNextType<MessageHandle>>(
+ &send_message_state_)) {
+ auto r = (*next)();
+ if (auto* p = r.value_if_ready()) {
+ memset(&send_message_, 0, sizeof(send_message_));
+ send_message_.payload = batch_payload();
+ send_message_.on_complete = &send_message_batch_done_;
+ // No value => half close from above.
+ if (p->has_value()) {
+ message_to_send_ = std::move(*p);
+ send_message_state_ = SendMessageToTransport{};
+ send_message_.send_message = true;
+ batch_payload()->send_message.send_message =
+ (*message_to_send_)->payload();
+ batch_payload()->send_message.flags = (*message_to_send_)->flags();
+ } else {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] PollConnectedChannel: half close",
+ Activity::current()->DebugTag().c_str());
+ }
+ GPR_ASSERT(!absl::holds_alternative<Closed>(send_message_state_));
+ send_message_state_ = Closed{};
+ send_message_.send_trailing_metadata = true;
+ if (client_trailing_metadata != nullptr) {
+ *client_trailing_metadata =
+ GetContext<Arena>()->MakePooled<ClientMetadata>(
+ GetContext<Arena>());
+ batch_payload()->send_trailing_metadata.send_trailing_metadata =
+ client_trailing_metadata->get();
+ batch_payload()->send_trailing_metadata.sent = nullptr;
+ } else {
+ return; // Skip rest of function for server
+ }
+ }
+ IncrementRefCount("send_message");
+ send_message_waker_ = Activity::current()->MakeOwningWaker();
+ SchedulePush(&send_message_);
+ }
+ }
+ }
+
+ void PollRecvMessage(PipeSender<MessageHandle>*& incoming_messages)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ if (auto* pending =
+ absl::get_if<PendingReceiveMessage>(&recv_message_state_)) {
+ if (pending->received) {
+ if (pending->payload.has_value()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollRecvMessage: received payload of "
+ "%" PRIdPTR " bytes",
+ recv_message_waker_.ActivityDebugTag().c_str(),
+ pending->payload->Length());
+ }
+ recv_message_state_ =
+ incoming_messages->Push(GetContext<Arena>()->MakePooled<Message>(
+ std::move(*pending->payload), pending->flags));
+ } else {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollRecvMessage: received no payload",
+ recv_message_waker_.ActivityDebugTag().c_str());
+ }
+ recv_message_state_ = Closed{};
+ std::exchange(incoming_messages, nullptr)->Close();
+ }
+ }
+ }
+ if (absl::holds_alternative<Idle>(recv_message_state_)) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] PollRecvMessage: requesting message",
+ Activity::current()->DebugTag().c_str());
+ }
+ PushRecvMessage();
+ }
+ if (auto* push = absl::get_if<PipeSender<MessageHandle>::PushType>(
+ &recv_message_state_)) {
+ auto r = (*push)();
+ if (bool* result = r.value_if_ready()) {
+ if (*result) {
+ if (!finished_) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollRecvMessage: pushed message; "
+ "requesting next",
+ Activity::current()->DebugTag().c_str());
+ }
+ PushRecvMessage();
+ } else {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollRecvMessage: pushed message "
+ "and finished; "
+ "marking closed",
+ Activity::current()->DebugTag().c_str());
+ }
+ recv_message_state_ = Closed{};
+ std::exchange(incoming_messages, nullptr)->Close();
+ }
+ } else {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollRecvMessage: failed to push "
+ "message; marking "
+ "closed",
+ Activity::current()->DebugTag().c_str());
+ }
+ recv_message_state_ = Closed{};
+ std::exchange(incoming_messages, nullptr)->Close();
+ }
+ }
+ }
+ }
+
+ std::string SendMessageString() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) {
+ return Match(
+ send_message_state_, [](Idle) -> std::string { return "IDLE"; },
+ [](Closed) -> std::string { return "CLOSED"; },
+ [](const PipeReceiverNextType<MessageHandle>&) -> std::string {
+ return "WAITING";
+ },
+ [](SendMessageToTransport) -> std::string { return "SENDING"; });
+ }
+
+ std::string RecvMessageString() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) {
+ return Match(
+ recv_message_state_, [](Idle) -> std::string { return "IDLE"; },
+ [](Closed) -> std::string { return "CLOSED"; },
+ [](const PendingReceiveMessage&) -> std::string { return "WAITING"; },
+ [](const absl::optional<MessageHandle>& message) -> std::string {
+ return absl::StrCat(
+ "READY:", message.has_value()
+ ? absl::StrCat((*message)->payload()->Length(), "b")
+ : "EOS");
+ },
+ [](const PipeSender<MessageHandle>::PushType&) -> std::string {
+ return "PUSHING";
+ });
+ }
+
+ bool IsPromiseReceiving() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) {
+ return absl::holds_alternative<PipeSender<MessageHandle>::PushType>(
+ recv_message_state_) ||
+ absl::holds_alternative<PendingReceiveMessage>(recv_message_state_);
+ }
private:
+ struct SendMessageToTransport {};
+ struct Idle {};
+ struct Closed {};
+
class StreamDeleter {
public:
explicit StreamDeleter(ConnectedChannelStream* impl) : impl_(impl) {}
@@ -348,7 +517,11 @@
using StreamPtr = std::unique_ptr<grpc_stream, StreamDeleter>;
void StreamDestroyed() {
- call_context_->RunInContext([this] { this->~ConnectedChannelStream(); });
+ call_context_->RunInContext([this] {
+ auto* cc = call_context_;
+ this->~ConnectedChannelStream();
+ cc->Unref("child_stream");
+ });
}
void BeginDestroy() {
@@ -359,434 +532,824 @@
}
}
+ // Called from outside the activity to push work down to the transport.
+ void Push() {
+ PushBatches push_batches;
+ {
+ MutexLock lock(&mu_);
+ push_batches.swap(push_batches_);
+ }
+ for (auto* batch : push_batches) {
+ if (stream() != nullptr) {
+ grpc_transport_perform_stream_op(transport(), stream(), batch);
+ } else {
+ grpc_transport_stream_op_batch_finish_with_failure_from_transport(
+ batch, absl::CancelledError());
+ }
+ }
+ Unref("push");
+ }
+
+ void SendMessageBatchDone(grpc_error_handle error) {
+ {
+ MutexLock lock(&mu_);
+ if (error != absl::OkStatus()) {
+ // Note that we're in error here, the call will be closed by the
+ // transport in a moment, and we'll return from the promise with an
+ // error - so we don't need to do any extra work to close out pipes or
+ // the like.
+ send_message_state_ = Closed{};
+ }
+ if (!absl::holds_alternative<Closed>(send_message_state_)) {
+ send_message_state_ = Idle{};
+ }
+ send_message_waker_.Wakeup();
+ }
+ Unref("send_message");
+ }
+
+ void RecvMessageBatchDone(grpc_error_handle error) {
+ {
+ MutexLock lock(mu());
+ if (error != absl::OkStatus()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] RecvMessageBatchDone: error=%s",
+ recv_message_waker_.ActivityDebugTag().c_str(),
+ StatusToString(error).c_str());
+ }
+ } else if (absl::holds_alternative<Closed>(recv_message_state_)) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] RecvMessageBatchDone: already closed, "
+ "ignoring",
+ recv_message_waker_.ActivityDebugTag().c_str());
+ }
+ } else {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] RecvMessageBatchDone: received message",
+ recv_message_waker_.ActivityDebugTag().c_str());
+ }
+ auto pending =
+ absl::get_if<PendingReceiveMessage>(&recv_message_state_);
+ GPR_ASSERT(pending != nullptr);
+ GPR_ASSERT(pending->received == false);
+ pending->received = true;
+ }
+ recv_message_waker_.Wakeup();
+ }
+ Unref("recv_message");
+ }
+
+ void PushRecvMessage() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ recv_message_state_ = PendingReceiveMessage{};
+ auto& pending_recv_message =
+ absl::get<PendingReceiveMessage>(recv_message_state_);
+ memset(&recv_message_, 0, sizeof(recv_message_));
+ recv_message_.payload = batch_payload();
+ recv_message_.on_complete = nullptr;
+ recv_message_.recv_message = true;
+ batch_payload()->recv_message.recv_message = &pending_recv_message.payload;
+ batch_payload()->recv_message.flags = &pending_recv_message.flags;
+ batch_payload()->recv_message.call_failed_before_recv_message = nullptr;
+ batch_payload()->recv_message.recv_message_ready =
+ &recv_message_batch_done_;
+ IncrementRefCount("recv_message");
+ recv_message_waker_ = Activity::current()->MakeOwningWaker();
+ SchedulePush(&recv_message_);
+ }
+
+ mutable Mutex mu_;
grpc_transport* const transport_;
- RefCountedPtr<CallContext> const call_context_{
- GetContext<CallContext>()->Ref()};
+ CallContext* const call_context_{GetContext<CallContext>()};
grpc_closure stream_destroyed_ =
MakeMemberClosure<ConnectedChannelStream,
&ConnectedChannelStream::StreamDestroyed>(
this, DEBUG_LOCATION);
grpc_stream_refcount stream_refcount_;
StreamPtr stream_;
- Arena* arena_ = GetContext<Arena>();
- Party* const party_ = static_cast<Party*>(Activity::current());
- ExternallyObservableLatch<void> finished_;
+ using PushBatches = absl::InlinedVector<grpc_transport_stream_op_batch*, 3>;
+ PushBatches push_batches_ ABSL_GUARDED_BY(mu_);
+ grpc_closure push_ =
+ MakeMemberClosure<ConnectedChannelStream, &ConnectedChannelStream::Push>(
+ this, DEBUG_LOCATION);
+
+ NextResult<MessageHandle> message_to_send_ ABSL_GUARDED_BY(mu_);
+ absl::variant<Idle, Closed, PipeReceiverNextType<MessageHandle>,
+ SendMessageToTransport>
+ send_message_state_ ABSL_GUARDED_BY(mu_);
+ grpc_transport_stream_op_batch send_message_;
+ grpc_closure send_message_batch_done_ =
+ MakeMemberClosure<ConnectedChannelStream,
+ &ConnectedChannelStream::SendMessageBatchDone>(
+ this, DEBUG_LOCATION);
+
+ struct PendingReceiveMessage {
+ absl::optional<SliceBuffer> payload;
+ uint32_t flags;
+ bool received = false;
+ };
+ absl::variant<Idle, PendingReceiveMessage, Closed,
+ PipeSender<MessageHandle>::PushType>
+ recv_message_state_ ABSL_GUARDED_BY(mu_);
+ grpc_closure recv_message_batch_done_ =
+ MakeMemberClosure<ConnectedChannelStream,
+ &ConnectedChannelStream::RecvMessageBatchDone>(
+ this, DEBUG_LOCATION);
+ grpc_transport_stream_op_batch recv_message_;
+
+ Waker send_message_waker_ ABSL_GUARDED_BY(mu_);
+ Waker recv_message_waker_ ABSL_GUARDED_BY(mu_);
+ bool finished_ ABSL_GUARDED_BY(mu_) = false;
+
+ grpc_transport_stream_op_batch_payload batch_payload_{
+ GetContext<grpc_call_context_element>()};
};
-
-auto ConnectedChannelStream::RecvMessages(
- PipeSender<MessageHandle>* incoming_messages) {
- return Loop([self = InternalRef(),
- incoming_messages = std::move(*incoming_messages)]() mutable {
- return Seq(
- GetContext<BatchBuilder>()->ReceiveMessage(self->batch_target()),
- [&incoming_messages](
- absl::StatusOr<absl::optional<MessageHandle>> status) mutable {
- bool has_message = status.ok() && status->has_value();
- auto publish_message = [&incoming_messages, &status]() {
- auto pending_message = std::move(**status);
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO,
- "%s[connected] RecvMessage: received payload of %" PRIdPTR
- " bytes",
- Activity::current()->DebugTag().c_str(),
- pending_message->payload()->Length());
- }
- return Map(incoming_messages.Push(std::move(pending_message)),
- [](bool ok) -> LoopCtl<absl::Status> {
- if (!ok) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO,
- "%s[connected] RecvMessage: failed to "
- "push message towards the application",
- Activity::current()->DebugTag().c_str());
- }
- return absl::OkStatus();
- }
- return Continue{};
- });
- };
- auto publish_close = [&status]() mutable {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO,
- "%s[connected] RecvMessage: reached end of stream with "
- "status:%s",
- Activity::current()->DebugTag().c_str(),
- status.status().ToString().c_str());
- }
- return Immediate(LoopCtl<absl::Status>(status.status()));
- };
- return If(has_message, std::move(publish_message),
- std::move(publish_close));
- });
- });
-}
-
-auto ConnectedChannelStream::SendMessages(
- PipeReceiver<MessageHandle>* outgoing_messages) {
- return ForEach(std::move(*outgoing_messages),
- [self = InternalRef()](MessageHandle message) {
- return GetContext<BatchBuilder>()->SendMessage(
- self->batch_target(), std::move(message));
- });
-}
-#endif // defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL) ||
- // defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
+#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
-ArenaPromise<ServerMetadataHandle> MakeClientCallPromise(
- grpc_transport* transport, CallArgs call_args, NextPromiseFactory) {
- OrphanablePtr<ConnectedChannelStream> stream(
- GetContext<Arena>()->New<ConnectedChannelStream>(transport));
- stream->SetStream(static_cast<grpc_stream*>(
- GetContext<Arena>()->Alloc(transport->vtable->sizeof_stream)));
- grpc_transport_init_stream(transport, stream->stream(),
- stream->stream_refcount(), nullptr,
- GetContext<Arena>());
- grpc_transport_set_pops(transport, stream->stream(),
- GetContext<CallContext>()->polling_entity());
- auto* party = static_cast<Party*>(Activity::current());
- // Start a loop to send messages from client_to_server_messages to the
- // transport. When the pipe closes and the loop completes, send a trailing
- // metadata batch to close the stream.
- party->Spawn(
- "send_messages",
- TrySeq(stream->SendMessages(call_args.client_to_server_messages),
- [stream = stream->InternalRef()]() {
- return GetContext<BatchBuilder>()->SendClientTrailingMetadata(
- stream->batch_target());
- }),
- [](absl::Status) {});
- // Start a promise to receive server initial metadata and then forward it up
- // through the receiving pipe.
- auto server_initial_metadata =
- GetContext<Arena>()->MakePooled<ServerMetadata>(GetContext<Arena>());
- party->Spawn(
- "recv_initial_metadata",
- TrySeq(GetContext<BatchBuilder>()->ReceiveServerInitialMetadata(
- stream->batch_target()),
- [pipe = call_args.server_initial_metadata](
- ServerMetadataHandle server_initial_metadata) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "%s[connected] Publish client initial metadata: %s",
- Activity::current()->DebugTag().c_str(),
- server_initial_metadata->DebugString().c_str());
- }
- return Map(pipe->Push(std::move(server_initial_metadata)),
- [](bool r) {
- if (r) return absl::OkStatus();
- return absl::CancelledError();
- });
- }),
- [](absl::Status) {});
+class ClientStream : public ConnectedChannelStream {
+ public:
+ ClientStream(grpc_transport* transport, CallArgs call_args)
+ : ConnectedChannelStream(transport),
+ server_initial_metadata_pipe_(call_args.server_initial_metadata),
+ client_to_server_messages_(call_args.client_to_server_messages),
+ server_to_client_messages_(call_args.server_to_client_messages),
+ client_initial_metadata_(std::move(call_args.client_initial_metadata)) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] InitImpl: intitial_metadata=%s",
+ Activity::current()->DebugTag().c_str(),
+ client_initial_metadata_->DebugString().c_str());
+ }
+ }
- // Build up the rest of the main call promise:
+ Poll<ServerMetadataHandle> PollOnce() {
+ MutexLock lock(mu());
+ GPR_ASSERT(!finished());
- // Create a promise that will send initial metadata and then signal completion
- // of that via the token.
- auto send_initial_metadata = Seq(
- GetContext<BatchBuilder>()->SendClientInitialMetadata(
- stream->batch_target(), std::move(call_args.client_initial_metadata)),
- [sent_initial_metadata_token =
- std::move(call_args.client_initial_metadata_outstanding)](
- absl::Status status) mutable {
- sent_initial_metadata_token.Complete(status.ok());
- return status;
- });
- // Create a promise that will receive server trailing metadata.
- // If this fails, we massage the error into metadata that we can report
- // upwards.
- auto server_trailing_metadata =
- GetContext<Arena>()->MakePooled<ServerMetadata>(GetContext<Arena>());
- auto recv_trailing_metadata =
- Map(GetContext<BatchBuilder>()->ReceiveServerTrailingMetadata(
- stream->batch_target()),
- [](absl::StatusOr<ServerMetadataHandle> status) mutable {
- if (!status.ok()) {
- auto server_trailing_metadata =
- GetContext<Arena>()->MakePooled<ServerMetadata>(
- GetContext<Arena>());
- grpc_status_code status_code = GRPC_STATUS_UNKNOWN;
- std::string message;
- grpc_error_get_status(status.status(), Timestamp::InfFuture(),
- &status_code, &message, nullptr, nullptr);
- server_trailing_metadata->Set(GrpcStatusMetadata(), status_code);
- server_trailing_metadata->Set(GrpcMessageMetadata(),
- Slice::FromCopiedString(message));
- return server_trailing_metadata;
- } else {
- return std::move(*status);
- }
- });
- // Finally the main call promise.
- // Concurrently: send initial metadata and receive messages, until BOTH
- // complete (or one fails).
- // Next: receive trailing metadata, and return that up the stack.
- auto recv_messages =
- stream->RecvMessages(call_args.server_to_client_messages);
- return Map(TrySeq(TryJoin(std::move(send_initial_metadata),
- std::move(recv_messages)),
- std::move(recv_trailing_metadata)),
- [stream = std::move(stream)](ServerMetadataHandle result) {
- stream->set_finished();
- return result;
- });
-}
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] PollConnectedChannel: %s",
+ Activity::current()->DebugTag().c_str(),
+ ActiveOpsString().c_str());
+ }
+
+ if (!std::exchange(requested_metadata_, true)) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollConnectedChannel: requesting metadata",
+ Activity::current()->DebugTag().c_str());
+ }
+ SetStream(static_cast<grpc_stream*>(
+ GetContext<Arena>()->Alloc(transport()->vtable->sizeof_stream)));
+ grpc_transport_init_stream(transport(), stream(), stream_refcount(),
+ nullptr, GetContext<Arena>());
+ grpc_transport_set_pops(transport(), stream(),
+ GetContext<CallContext>()->polling_entity());
+ memset(&metadata_, 0, sizeof(metadata_));
+ metadata_.send_initial_metadata = true;
+ metadata_.recv_initial_metadata = true;
+ metadata_.recv_trailing_metadata = true;
+ metadata_.payload = batch_payload();
+ metadata_.on_complete = &metadata_batch_done_;
+ batch_payload()->send_initial_metadata.send_initial_metadata =
+ client_initial_metadata_.get();
+ server_initial_metadata_ =
+ GetContext<Arena>()->MakePooled<ServerMetadata>(GetContext<Arena>());
+ batch_payload()->recv_initial_metadata.recv_initial_metadata =
+ server_initial_metadata_.get();
+ batch_payload()->recv_initial_metadata.recv_initial_metadata_ready =
+ &recv_initial_metadata_ready_;
+ batch_payload()->recv_initial_metadata.trailing_metadata_available =
+ nullptr;
+ server_trailing_metadata_ =
+ GetContext<Arena>()->MakePooled<ServerMetadata>(GetContext<Arena>());
+ batch_payload()->recv_trailing_metadata.recv_trailing_metadata =
+ server_trailing_metadata_.get();
+ batch_payload()->recv_trailing_metadata.collect_stats =
+ &GetContext<CallContext>()->call_stats()->transport_stream_stats;
+ batch_payload()->recv_trailing_metadata.recv_trailing_metadata_ready =
+ &recv_trailing_metadata_ready_;
+ IncrementRefCount("metadata_batch_done");
+ IncrementRefCount("initial_metadata_ready");
+ IncrementRefCount("trailing_metadata_ready");
+ initial_metadata_waker_ = Activity::current()->MakeOwningWaker();
+ trailing_metadata_waker_ = Activity::current()->MakeOwningWaker();
+ SchedulePush(&metadata_);
+ }
+ if (server_initial_metadata_state_ ==
+ ServerInitialMetadataState::kReceivedButNotPushed) {
+ server_initial_metadata_state_ = ServerInitialMetadataState::kPushing;
+ server_initial_metadata_push_promise_ =
+ server_initial_metadata_pipe_->Push(
+ std::move(server_initial_metadata_));
+ }
+ if (server_initial_metadata_state_ ==
+ ServerInitialMetadataState::kPushing) {
+ auto r = (*server_initial_metadata_push_promise_)();
+ if (r.ready()) {
+ server_initial_metadata_state_ = ServerInitialMetadataState::kPushed;
+ server_initial_metadata_push_promise_.reset();
+ }
+ }
+ PollSendMessage(client_to_server_messages_, &client_trailing_metadata_);
+ PollRecvMessage(server_to_client_messages_);
+ if (server_initial_metadata_state_ == ServerInitialMetadataState::kPushed &&
+ !IsPromiseReceiving() &&
+ std::exchange(queued_trailing_metadata_, false)) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[connected] PollConnectedChannel: finished request, "
+ "returning: {%s}; "
+ "active_ops: %s",
+ Activity::current()->DebugTag().c_str(),
+ server_trailing_metadata_->DebugString().c_str(),
+ ActiveOpsString().c_str());
+ }
+ set_finished();
+ return ServerMetadataHandle(std::move(server_trailing_metadata_));
+ }
+ return Pending{};
+ }
+
+ void RecvInitialMetadataReady(grpc_error_handle error) {
+ GPR_ASSERT(error == absl::OkStatus());
+ {
+ MutexLock lock(mu());
+ server_initial_metadata_state_ =
+ ServerInitialMetadataState::kReceivedButNotPushed;
+ initial_metadata_waker_.Wakeup();
+ }
+ Unref("initial_metadata_ready");
+ }
+
+ void RecvTrailingMetadataReady(grpc_error_handle error) {
+ GPR_ASSERT(error == absl::OkStatus());
+ {
+ MutexLock lock(mu());
+ queued_trailing_metadata_ = true;
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "%s[connected] RecvTrailingMetadataReady: "
+ "queued_trailing_metadata_ "
+ "set to true; active_ops: %s",
+ trailing_metadata_waker_.ActivityDebugTag().c_str(),
+ ActiveOpsString().c_str());
+ }
+ trailing_metadata_waker_.Wakeup();
+ }
+ Unref("trailing_metadata_ready");
+ }
+
+ void MetadataBatchDone(grpc_error_handle error) {
+ GPR_ASSERT(error == absl::OkStatus());
+ Unref("metadata_batch_done");
+ }
+
+ private:
+ enum class ServerInitialMetadataState : uint8_t {
+ // Initial metadata has not been received from the server.
+ kNotReceived,
+ // Initial metadata has been received from the server via the transport, but
+ // has not yet been pushed onto the pipe to publish it up the call stack.
+ kReceivedButNotPushed,
+ // Initial metadata has been received from the server via the transport and
+ // has been pushed on the pipe to publish it up the call stack.
+ // It's still in the pipe and has not been removed by the call at the top
+ // yet.
+ kPushing,
+ // Initial metadata has been received from the server via the transport and
+ // has been pushed on the pipe to publish it up the call stack AND removed
+ // by the call at the top.
+ kPushed,
+ };
+
+ std::string ActiveOpsString() const override
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) {
+ std::vector<std::string> ops;
+ if (finished()) ops.push_back("FINISHED");
+ // Outstanding Operations on Transport
+ std::vector<std::string> waiting;
+ if (initial_metadata_waker_ != Waker()) {
+ waiting.push_back("initial_metadata");
+ }
+ if (trailing_metadata_waker_ != Waker()) {
+ waiting.push_back("trailing_metadata");
+ }
+ if (!waiting.empty()) {
+ ops.push_back(absl::StrCat("waiting:", absl::StrJoin(waiting, ",")));
+ }
+ // Results from transport
+ std::vector<std::string> queued;
+ if (server_initial_metadata_state_ ==
+ ServerInitialMetadataState::kReceivedButNotPushed) {
+ queued.push_back("initial_metadata");
+ }
+ if (queued_trailing_metadata_) queued.push_back("trailing_metadata");
+ if (!queued.empty()) {
+ ops.push_back(absl::StrCat("queued:", absl::StrJoin(queued, ",")));
+ }
+ // Send message
+ std::string send_message_state = SendMessageString();
+ if (send_message_state != "WAITING") {
+ ops.push_back(absl::StrCat("send_message:", send_message_state));
+ }
+ // Receive message
+ std::string recv_message_state = RecvMessageString();
+ if (recv_message_state != "IDLE") {
+ ops.push_back(absl::StrCat("recv_message:", recv_message_state));
+ }
+ return absl::StrJoin(ops, " ");
+ }
+
+ bool requested_metadata_ = false;
+ ServerInitialMetadataState server_initial_metadata_state_
+ ABSL_GUARDED_BY(mu()) = ServerInitialMetadataState::kNotReceived;
+ bool queued_trailing_metadata_ ABSL_GUARDED_BY(mu()) = false;
+ Waker initial_metadata_waker_ ABSL_GUARDED_BY(mu());
+ Waker trailing_metadata_waker_ ABSL_GUARDED_BY(mu());
+ PipeSender<ServerMetadataHandle>* server_initial_metadata_pipe_;
+ PipeReceiver<MessageHandle>* client_to_server_messages_;
+ PipeSender<MessageHandle>* server_to_client_messages_;
+ grpc_closure recv_initial_metadata_ready_ =
+ MakeMemberClosure<ClientStream, &ClientStream::RecvInitialMetadataReady>(
+ this, DEBUG_LOCATION);
+ grpc_closure recv_trailing_metadata_ready_ =
+ MakeMemberClosure<ClientStream, &ClientStream::RecvTrailingMetadataReady>(
+ this, DEBUG_LOCATION);
+ ClientMetadataHandle client_initial_metadata_;
+ ClientMetadataHandle client_trailing_metadata_;
+ ServerMetadataHandle server_initial_metadata_;
+ ServerMetadataHandle server_trailing_metadata_;
+ absl::optional<PipeSender<ServerMetadataHandle>::PushType>
+ server_initial_metadata_push_promise_;
+ grpc_transport_stream_op_batch metadata_;
+ grpc_closure metadata_batch_done_ =
+ MakeMemberClosure<ClientStream, &ClientStream::MetadataBatchDone>(
+ this, DEBUG_LOCATION);
+};
+
+class ClientConnectedCallPromise {
+ public:
+ ClientConnectedCallPromise(grpc_transport* transport, CallArgs call_args)
+ : impl_(GetContext<Arena>()->New<ClientStream>(transport,
+ std::move(call_args))) {}
+
+ ClientConnectedCallPromise(const ClientConnectedCallPromise&) = delete;
+ ClientConnectedCallPromise& operator=(const ClientConnectedCallPromise&) =
+ delete;
+ ClientConnectedCallPromise(ClientConnectedCallPromise&& other) noexcept
+ : impl_(std::exchange(other.impl_, nullptr)) {}
+ ClientConnectedCallPromise& operator=(
+ ClientConnectedCallPromise&& other) noexcept {
+ impl_ = std::move(other.impl_);
+ return *this;
+ }
+
+ static ArenaPromise<ServerMetadataHandle> Make(grpc_transport* transport,
+ CallArgs call_args,
+ NextPromiseFactory) {
+ return ClientConnectedCallPromise(transport, std::move(call_args));
+ }
+
+ Poll<ServerMetadataHandle> operator()() { return impl_->PollOnce(); }
+
+ private:
+ OrphanablePtr<ClientStream> impl_;
+};
#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
-ArenaPromise<ServerMetadataHandle> MakeServerCallPromise(
- grpc_transport* transport, CallArgs,
- NextPromiseFactory next_promise_factory) {
- OrphanablePtr<ConnectedChannelStream> stream(
- GetContext<Arena>()->New<ConnectedChannelStream>(transport));
+class ServerStream final : public ConnectedChannelStream {
+ public:
+ ServerStream(grpc_transport* transport,
+ NextPromiseFactory next_promise_factory)
+ : ConnectedChannelStream(transport) {
+ SetStream(static_cast<grpc_stream*>(
+ GetContext<Arena>()->Alloc(transport->vtable->sizeof_stream)));
+ grpc_transport_init_stream(
+ transport, stream(), stream_refcount(),
+ GetContext<CallContext>()->server_call_context()->server_stream_data(),
+ GetContext<Arena>());
+ grpc_transport_set_pops(transport, stream(),
+ GetContext<CallContext>()->polling_entity());
- stream->SetStream(static_cast<grpc_stream*>(
- GetContext<Arena>()->Alloc(transport->vtable->sizeof_stream)));
- grpc_transport_init_stream(
- transport, stream->stream(), stream->stream_refcount(),
- GetContext<CallContext>()->server_call_context()->server_stream_data(),
- GetContext<Arena>());
- grpc_transport_set_pops(transport, stream->stream(),
- GetContext<CallContext>()->polling_entity());
+ // Fetch initial metadata
+ auto& gim = call_state_.emplace<GettingInitialMetadata>(this);
+ gim.recv_initial_metadata_ready_waker =
+ Activity::current()->MakeOwningWaker();
+ memset(&gim.recv_initial_metadata, 0, sizeof(gim.recv_initial_metadata));
+ gim.recv_initial_metadata.payload = batch_payload();
+ gim.recv_initial_metadata.on_complete = nullptr;
+ gim.recv_initial_metadata.recv_initial_metadata = true;
+ gim.next_promise_factory = std::move(next_promise_factory);
+ batch_payload()->recv_initial_metadata.recv_initial_metadata =
+ gim.client_initial_metadata.get();
+ batch_payload()->recv_initial_metadata.recv_initial_metadata_ready =
+ &gim.recv_initial_metadata_ready;
+ SchedulePush(&gim.recv_initial_metadata);
- auto* party = static_cast<Party*>(Activity::current());
+ // Fetch trailing metadata (to catch cancellations)
+ auto& gtm =
+ client_trailing_metadata_state_.emplace<WaitingForTrailingMetadata>();
+ gtm.recv_trailing_metadata_ready =
+ MakeMemberClosure<ServerStream,
+ &ServerStream::RecvTrailingMetadataReady>(this);
+ memset(>m.recv_trailing_metadata, 0, sizeof(gtm.recv_trailing_metadata));
+ gtm.recv_trailing_metadata.payload = batch_payload();
+ gtm.recv_trailing_metadata.recv_trailing_metadata = true;
+ batch_payload()->recv_trailing_metadata.recv_trailing_metadata =
+ gtm.result.get();
+ batch_payload()->recv_trailing_metadata.collect_stats =
+ &GetContext<CallContext>()->call_stats()->transport_stream_stats;
+ batch_payload()->recv_trailing_metadata.recv_trailing_metadata_ready =
+ >m.recv_trailing_metadata_ready;
+ SchedulePush(>m.recv_trailing_metadata);
+ gtm.waker = Activity::current()->MakeOwningWaker();
+ }
- // Arifacts we need for the lifetime of the call.
- struct CallData {
+ Poll<ServerMetadataHandle> PollOnce() {
+ MutexLock lock(mu());
+
+ auto poll_send_initial_metadata = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
+ mu()) {
+ if (auto* promise =
+ absl::get_if<PipeReceiverNextType<ServerMetadataHandle>>(
+ &server_initial_metadata_)) {
+ auto r = (*promise)();
+ if (auto* md = r.value_if_ready()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(
+ GPR_INFO, "%s[connected] got initial metadata %s",
+ Activity::current()->DebugTag().c_str(),
+ (md->has_value() ? (**md)->DebugString() : "<trailers-only>")
+ .c_str());
+ }
+ memset(&send_initial_metadata_, 0, sizeof(send_initial_metadata_));
+ send_initial_metadata_.send_initial_metadata = true;
+ send_initial_metadata_.payload = batch_payload();
+ send_initial_metadata_.on_complete = &send_initial_metadata_done_;
+ batch_payload()->send_initial_metadata.send_initial_metadata =
+ server_initial_metadata_
+ .emplace<ServerMetadataHandle>(std::move(**md))
+ .get();
+ SchedulePush(&send_initial_metadata_);
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ return true;
+ }
+ };
+
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] PollConnectedChannel: %s",
+ Activity::current()->DebugTag().c_str(),
+ ActiveOpsString().c_str());
+ }
+
+ poll_send_initial_metadata();
+
+ if (auto* p = absl::get_if<GotClientHalfClose>(
+ &client_trailing_metadata_state_)) {
+ pipes_.client_to_server.sender.Close();
+ if (!p->result.ok()) {
+ // client cancelled, we should cancel too
+ if (absl::holds_alternative<absl::monostate>(call_state_) ||
+ absl::holds_alternative<GotInitialMetadata>(call_state_) ||
+ absl::holds_alternative<MessageLoop>(call_state_)) {
+ if (!absl::holds_alternative<ServerMetadataHandle>(
+ server_initial_metadata_)) {
+ // pretend we've sent initial metadata to stop that op from
+ // progressing if it's stuck somewhere above us in the stack
+ server_initial_metadata_.emplace<ServerMetadataHandle>();
+ }
+ // cancel the call - this status will be returned to the server bottom
+ // promise
+ call_state_.emplace<Complete>(
+ Complete{ServerMetadataFromStatus(p->result)});
+ }
+ }
+ }
+
+ if (auto* p = absl::get_if<GotInitialMetadata>(&call_state_)) {
+ incoming_messages_ = &pipes_.client_to_server.sender;
+ auto promise = p->next_promise_factory(CallArgs{
+ std::move(p->client_initial_metadata),
+ &pipes_.server_initial_metadata.sender,
+ &pipes_.client_to_server.receiver, &pipes_.server_to_client.sender});
+ call_state_.emplace<MessageLoop>(
+ MessageLoop{&pipes_.server_to_client.receiver, std::move(promise)});
+ server_initial_metadata_
+ .emplace<PipeReceiverNextType<ServerMetadataHandle>>(
+ pipes_.server_initial_metadata.receiver.Next());
+ }
+ if (incoming_messages_ != nullptr) {
+ PollRecvMessage(incoming_messages_);
+ }
+ if (auto* p = absl::get_if<MessageLoop>(&call_state_)) {
+ if (absl::holds_alternative<ServerMetadataHandle>(
+ server_initial_metadata_)) {
+ PollSendMessage(p->outgoing_messages, nullptr);
+ }
+ auto poll = p->promise();
+ if (auto* r = poll.value_if_ready()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[connected] got trailing metadata %s; %s",
+ Activity::current()->DebugTag().c_str(),
+ (*r)->DebugString().c_str(), ActiveOpsString().c_str());
+ }
+ auto& completing = call_state_.emplace<Completing>();
+ completing.server_trailing_metadata = std::move(*r);
+ completing.on_complete =
+ MakeMemberClosure<ServerStream,
+ &ServerStream::SendTrailingMetadataDone>(this);
+ completing.waker = Activity::current()->MakeOwningWaker();
+ auto& op = completing.send_trailing_metadata;
+ memset(&op, 0, sizeof(op));
+ op.payload = batch_payload();
+ op.on_complete = &completing.on_complete;
+ // If we've gotten initial server metadata, we can send trailing
+ // metadata.
+ // Otherwise we need to cancel the call.
+ // There could be an unlucky ordering, so we poll here to make sure.
+ if (poll_send_initial_metadata()) {
+ op.send_trailing_metadata = true;
+ batch_payload()->send_trailing_metadata.send_trailing_metadata =
+ completing.server_trailing_metadata.get();
+ batch_payload()->send_trailing_metadata.sent = &completing.sent;
+ } else {
+ op.cancel_stream = true;
+ const auto status_code =
+ completing.server_trailing_metadata->get(GrpcStatusMetadata())
+ .value_or(GRPC_STATUS_UNKNOWN);
+ batch_payload()->cancel_stream.cancel_error = grpc_error_set_int(
+ absl::Status(static_cast<absl::StatusCode>(status_code),
+ completing.server_trailing_metadata
+ ->GetOrCreatePointer(GrpcMessageMetadata())
+ ->as_string_view()),
+ StatusIntProperty::kRpcStatus, status_code);
+ }
+ SchedulePush(&op);
+ }
+ }
+ if (auto* p = absl::get_if<Complete>(&call_state_)) {
+ set_finished();
+ return std::move(p->result);
+ }
+ return Pending{};
+ }
+
+ private:
+ // Call state: we've asked the transport for initial metadata and are
+ // waiting for it before proceeding.
+ struct GettingInitialMetadata {
+ explicit GettingInitialMetadata(ServerStream* stream)
+ : recv_initial_metadata_ready(
+ MakeMemberClosure<ServerStream,
+ &ServerStream::RecvInitialMetadataReady>(
+ stream)) {}
+ // The batch we're using to get initial metadata.
+ grpc_transport_stream_op_batch recv_initial_metadata;
+ // Waker to re-enter the activity once the transport returns.
+ Waker recv_initial_metadata_ready_waker;
+ // Initial metadata storage for the transport.
+ ClientMetadataHandle client_initial_metadata =
+ GetContext<Arena>()->MakePooled<ClientMetadata>(GetContext<Arena>());
+ // Closure for the transport to call when it's ready.
+ grpc_closure recv_initial_metadata_ready;
+ // Next promise factory to use once we have initial metadata.
+ NextPromiseFactory next_promise_factory;
+ };
+
+ // Call state: transport has returned initial metadata, we're waiting to
+ // re-enter the activity to process it.
+ struct GotInitialMetadata {
+ ClientMetadataHandle client_initial_metadata;
+ NextPromiseFactory next_promise_factory;
+ };
+
+ // Call state: we're sending/receiving messages and processing the filter
+ // stack.
+ struct MessageLoop {
+ PipeReceiver<MessageHandle>* outgoing_messages;
+ ArenaPromise<ServerMetadataHandle> promise;
+ };
+
+ // Call state: promise stack has returned trailing metadata, we're sending it
+ // to the transport to communicate.
+ struct Completing {
+ ServerMetadataHandle server_trailing_metadata;
+ grpc_transport_stream_op_batch send_trailing_metadata;
+ grpc_closure on_complete;
+ bool sent = false;
+ Waker waker;
+ };
+
+ // Call state: server metadata has been communicated to the transport and sent
+ // to the client.
+ // The metadata will be returned down to the server call to tick the
+ // cancellation bit or not on the originating batch.
+ struct Complete {
+ ServerMetadataHandle result;
+ };
+
+ // Trailing metadata state: we've asked the transport for trailing metadata
+ // and are waiting for it before proceeding.
+ struct WaitingForTrailingMetadata {
+ ClientMetadataHandle result =
+ GetContext<Arena>()->MakePooled<ClientMetadata>(GetContext<Arena>());
+ grpc_transport_stream_op_batch recv_trailing_metadata;
+ grpc_closure recv_trailing_metadata_ready;
+ Waker waker;
+ };
+
+ // We've received trailing metadata from the transport - which indicates reads
+ // are closed.
+ // We convert to an absl::Status here and use that to drive a decision to
+ // cancel the call (on error) or not.
+ struct GotClientHalfClose {
+ absl::Status result;
+ };
+
+ void RecvInitialMetadataReady(absl::Status status) {
+ MutexLock lock(mu());
+ auto& getting = absl::get<GettingInitialMetadata>(call_state_);
+ auto waker = std::move(getting.recv_initial_metadata_ready_waker);
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "%sGOT INITIAL METADATA: err=%s %s",
+ waker.ActivityDebugTag().c_str(), status.ToString().c_str(),
+ getting.client_initial_metadata->DebugString().c_str());
+ }
+ GotInitialMetadata got{std::move(getting.client_initial_metadata),
+ std::move(getting.next_promise_factory)};
+ call_state_.emplace<GotInitialMetadata>(std::move(got));
+ waker.Wakeup();
+ }
+
+ void SendTrailingMetadataDone(absl::Status result) {
+ MutexLock lock(mu());
+ auto& completing = absl::get<Completing>(call_state_);
+ auto md = std::move(completing.server_trailing_metadata);
+ auto waker = std::move(completing.waker);
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "%sSEND TRAILING METADATA DONE: err=%s sent=%s %s",
+ waker.ActivityDebugTag().c_str(), result.ToString().c_str(),
+ completing.sent ? "true" : "false", md->DebugString().c_str());
+ }
+ md->Set(GrpcStatusFromWire(), completing.sent);
+ if (!result.ok()) {
+ md->Clear();
+ md->Set(GrpcStatusMetadata(),
+ static_cast<grpc_status_code>(result.code()));
+ md->Set(GrpcMessageMetadata(), Slice::FromCopiedString(result.message()));
+ md->Set(GrpcStatusFromWire(), false);
+ }
+ call_state_.emplace<Complete>(Complete{std::move(md)});
+ waker.Wakeup();
+ }
+
+ std::string ActiveOpsString() const override
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) {
+ std::vector<std::string> ops;
+ ops.push_back(absl::StrCat(
+ "call_state:",
+ Match(
+ call_state_,
+ [](const absl::monostate&) { return "absl::monostate"; },
+ [](const GettingInitialMetadata&) { return "GETTING"; },
+ [](const GotInitialMetadata&) { return "GOT"; },
+ [](const MessageLoop&) { return "RUNNING"; },
+ [](const Completing&) { return "COMPLETING"; },
+ [](const Complete&) { return "COMPLETE"; })));
+ ops.push_back(
+ absl::StrCat("client_trailing_metadata_state:",
+ Match(
+ client_trailing_metadata_state_,
+ [](const absl::monostate&) -> std::string {
+ return "absl::monostate";
+ },
+ [](const WaitingForTrailingMetadata&) -> std::string {
+ return "WAITING";
+ },
+ [](const GotClientHalfClose& got) -> std::string {
+ return absl::StrCat("GOT:", got.result.ToString());
+ })));
+ // Send initial metadata
+ ops.push_back(absl::StrCat(
+ "server_initial_metadata_state:",
+ Match(
+ server_initial_metadata_,
+ [](const absl::monostate&) { return "absl::monostate"; },
+ [](const PipeReceiverNextType<ServerMetadataHandle>&) {
+ return "WAITING";
+ },
+ [](const ServerMetadataHandle&) { return "GOT"; })));
+ // Send message
+ std::string send_message_state = SendMessageString();
+ if (send_message_state != "WAITING") {
+ ops.push_back(absl::StrCat("send_message:", send_message_state));
+ }
+ // Receive message
+ std::string recv_message_state = RecvMessageString();
+ if (recv_message_state != "IDLE") {
+ ops.push_back(absl::StrCat("recv_message:", recv_message_state));
+ }
+ return absl::StrJoin(ops, " ");
+ }
+
+ void SendInitialMetadataDone() {}
+
+ void RecvTrailingMetadataReady(absl::Status error) {
+ MutexLock lock(mu());
+ auto& state =
+ absl::get<WaitingForTrailingMetadata>(client_trailing_metadata_state_);
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%sRecvTrailingMetadataReady: error:%s metadata:%s state:%s",
+ state.waker.ActivityDebugTag().c_str(), error.ToString().c_str(),
+ state.result->DebugString().c_str(), ActiveOpsString().c_str());
+ }
+ auto waker = std::move(state.waker);
+ ServerMetadataHandle result = std::move(state.result);
+ if (error.ok()) {
+ auto* message = result->get_pointer(GrpcMessageMetadata());
+ error = absl::Status(
+ static_cast<absl::StatusCode>(
+ result->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN)),
+ message == nullptr ? "" : message->as_string_view());
+ }
+ client_trailing_metadata_state_.emplace<GotClientHalfClose>(
+ GotClientHalfClose{error});
+ waker.Wakeup();
+ }
+
+ struct Pipes {
Pipe<MessageHandle> server_to_client;
Pipe<MessageHandle> client_to_server;
Pipe<ServerMetadataHandle> server_initial_metadata;
- Latch<ServerMetadataHandle> failure_latch;
- bool sent_initial_metadata = false;
- bool sent_trailing_metadata = false;
};
- auto* call_data = GetContext<Arena>()->ManagedNew<CallData>();
- auto server_to_client_empty =
- call_data->server_to_client.receiver.AwaitEmpty();
+ using CallState =
+ absl::variant<absl::monostate, GettingInitialMetadata, GotInitialMetadata,
+ MessageLoop, Completing, Complete>;
+ CallState call_state_ ABSL_GUARDED_BY(mu()) = absl::monostate{};
+ using ClientTrailingMetadataState =
+ absl::variant<absl::monostate, WaitingForTrailingMetadata,
+ GotClientHalfClose>;
+ ClientTrailingMetadataState client_trailing_metadata_state_
+ ABSL_GUARDED_BY(mu()) = absl::monostate{};
+ absl::variant<absl::monostate, PipeReceiverNextType<ServerMetadataHandle>,
+ ServerMetadataHandle>
+ ABSL_GUARDED_BY(mu()) server_initial_metadata_ = absl::monostate{};
+ PipeSender<MessageHandle>* incoming_messages_ = nullptr;
+ grpc_transport_stream_op_batch send_initial_metadata_;
+ grpc_closure send_initial_metadata_done_ =
+ MakeMemberClosure<ServerStream, &ServerStream::SendInitialMetadataDone>(
+ this);
+ Pipes pipes_ ABSL_GUARDED_BY(mu());
+};
- // Create a promise that will receive client initial metadata, and then run
- // the main stem of the call (calling next_promise_factory up through the
- // filters).
- // Race the main call with failure_latch, allowing us to forcefully complete
- // the call in the case of a failure.
- auto recv_initial_metadata_then_run_promise =
- TrySeq(GetContext<BatchBuilder>()->ReceiveClientInitialMetadata(
- stream->batch_target()),
- [next_promise_factory = std::move(next_promise_factory),
- server_to_client_empty = std::move(server_to_client_empty),
- call_data](ClientMetadataHandle client_initial_metadata) {
- auto call_promise = next_promise_factory(CallArgs{
- std::move(client_initial_metadata),
- ClientInitialMetadataOutstandingToken::Empty(),
- &call_data->server_initial_metadata.sender,
- &call_data->client_to_server.receiver,
- &call_data->server_to_client.sender,
- });
- return Race(call_data->failure_latch.Wait(),
- [call_promise = std::move(call_promise),
- server_to_client_empty =
- std::move(server_to_client_empty)]() mutable
- -> Poll<ServerMetadataHandle> {
- // TODO(ctiller): this is deeply weird and we need
- // to clean this up.
- //
- // The following few lines check to ensure that
- // there's no message currently pending in the
- // outgoing message queue, and if (and only if)
- // that's true decides to poll the main promise to
- // see if there's a result.
- //
- // This essentially introduces a polling priority
- // scheme that makes the current promise structure
- // work out the way we want when talking to
- // transports.
- //
- // The problem is that transports are going to need
- // to replicate this structure when they convert to
- // promises, and that becomes troubling as we'll be
- // replicating weird throughout the stack.
- //
- // Instead we likely need to change the way we're
- // composing promises through the stack.
- //
- // Proposed is to change filters from a promise
- // that takes ClientInitialMetadata and returns
- // ServerTrailingMetadata with three pipes for
- // ServerInitialMetadata and
- // ClientToServerMessages, ServerToClientMessages.
- // Instead we'll have five pipes, moving
- // ClientInitialMetadata and ServerTrailingMetadata
- // to pipes that can be intercepted.
- //
- // The effect of this change will be to cripple the
- // things that can be done in a filter (but cripple
- // in line with what most filters actually do).
- // We'll likely need to add a `CallContext::Cancel`
- // to allow filters to cancel a request, but this
- // would also have the advantage of centralizing
- // our cancellation machinery which seems like an
- // additional win - with the net effect that the
- // shape of the call gets made explicit at the top
- // & bottom of the stack.
- //
- // There's a small set of filters (retry, this one,
- // lame client, clinet channel) that terminate
- // stacks and need a richer set of semantics, but
- // that ends up being fine because we can spawn
- // tasks in parties to handle those edge cases, and
- // keep the majority of filters simple: they just
- // call InterceptAndMap on a handful of filters at
- // call initialization time and then proceed to
- // actually filter.
- //
- // So that's the plan, why isn't it enacted here?
- //
- // Well, the plan ends up being easy to implement
- // in the promise based world (I did a prototype on
- // a branch in an afternoon). It's heinous to
- // implement in promise_based_filter, and that code
- // is load bearing for us at the time of writing.
- // It's not worth delaying promises for a further N
- // months (N ~ 6) to make that change.
- //
- // Instead, we'll move forward with this, get
- // promise_based_filter out of the picture, and
- // then during the mop-up phase for promises tweak
- // the compute structure to move to the magical
- // five pipes (I'm reminded of an old Onion
- // article), and end up in a good happy place.
- if (server_to_client_empty().pending()) {
- return Pending{};
- }
- return call_promise();
- });
- });
+class ServerConnectedCallPromise {
+ public:
+ ServerConnectedCallPromise(grpc_transport* transport,
+ NextPromiseFactory next_promise_factory)
+ : impl_(GetContext<Arena>()->New<ServerStream>(
+ transport, std::move(next_promise_factory))) {}
- // Promise factory that accepts a ServerMetadataHandle, and sends it as the
- // trailing metadata for this call.
- auto send_trailing_metadata =
- [call_data, stream = stream->InternalRef()](
- ServerMetadataHandle server_trailing_metadata) {
- return GetContext<BatchBuilder>()->SendServerTrailingMetadata(
- stream->batch_target(), std::move(server_trailing_metadata),
- !std::exchange(call_data->sent_initial_metadata, true));
- };
+ ServerConnectedCallPromise(const ServerConnectedCallPromise&) = delete;
+ ServerConnectedCallPromise& operator=(const ServerConnectedCallPromise&) =
+ delete;
+ ServerConnectedCallPromise(ServerConnectedCallPromise&& other) noexcept
+ : impl_(std::exchange(other.impl_, nullptr)) {}
+ ServerConnectedCallPromise& operator=(
+ ServerConnectedCallPromise&& other) noexcept {
+ impl_ = std::move(other.impl_);
+ return *this;
+ }
- // Runs the receive message loop, either until all the messages
- // are received or the server call is complete.
- party->Spawn(
- "recv_messages",
- Race(
- Map(stream->WaitFinished(), [](Empty) { return absl::OkStatus(); }),
- Map(stream->RecvMessages(&call_data->client_to_server.sender),
- [failure_latch = &call_data->failure_latch](absl::Status status) {
- if (!status.ok() && !failure_latch->is_set()) {
- failure_latch->Set(ServerMetadataFromStatus(status));
- }
- return status;
- })),
- [](absl::Status) {});
+ static ArenaPromise<ServerMetadataHandle> Make(grpc_transport* transport,
+ CallArgs,
+ NextPromiseFactory next) {
+ return ServerConnectedCallPromise(transport, std::move(next));
+ }
- // Run a promise that will send initial metadata (if that pipe sends some).
- // And then run the send message loop until that completes.
+ Poll<ServerMetadataHandle> operator()() { return impl_->PollOnce(); }
- auto send_initial_metadata = Seq(
- Race(Map(stream->WaitFinished(),
- [](Empty) { return NextResult<ServerMetadataHandle>(true); }),
- call_data->server_initial_metadata.receiver.Next()),
- [call_data, stream = stream->InternalRef()](
- NextResult<ServerMetadataHandle> next_result) mutable {
- auto md = !call_data->sent_initial_metadata && next_result.has_value()
- ? std::move(next_result.value())
- : nullptr;
- if (md != nullptr) {
- call_data->sent_initial_metadata = true;
- auto* party = static_cast<Party*>(Activity::current());
- party->Spawn("connected/send_initial_metadata",
- GetContext<BatchBuilder>()->SendServerInitialMetadata(
- stream->batch_target(), std::move(md)),
- [](absl::Status) {});
- return Immediate(absl::OkStatus());
- }
- return Immediate(absl::CancelledError());
- });
- party->Spawn(
- "send_initial_metadata_then_messages",
- Race(Map(stream->WaitFinished(), [](Empty) { return absl::OkStatus(); }),
- TrySeq(std::move(send_initial_metadata),
- stream->SendMessages(&call_data->server_to_client.receiver))),
- [](absl::Status) {});
-
- // Spawn a job to fetch the "client trailing metadata" - if this is OK then
- // it's client done, otherwise it's a signal of cancellation from the client
- // which we'll use failure_latch to signal.
-
- party->Spawn(
- "recv_trailing_metadata",
- Seq(GetContext<BatchBuilder>()->ReceiveClientTrailingMetadata(
- stream->batch_target()),
- [failure_latch = &call_data->failure_latch](
- absl::StatusOr<ClientMetadataHandle> status) mutable {
- if (grpc_call_trace.enabled()) {
- gpr_log(
- GPR_DEBUG,
- "%s[connected] Got trailing metadata; status=%s metadata=%s",
- Activity::current()->DebugTag().c_str(),
- status.status().ToString().c_str(),
- status.ok() ? (*status)->DebugString().c_str() : "<none>");
- }
- ClientMetadataHandle trailing_metadata;
- if (status.ok()) {
- trailing_metadata = std::move(*status);
- } else {
- trailing_metadata =
- GetContext<Arena>()->MakePooled<ClientMetadata>(
- GetContext<Arena>());
- grpc_status_code status_code = GRPC_STATUS_UNKNOWN;
- std::string message;
- grpc_error_get_status(status.status(), Timestamp::InfFuture(),
- &status_code, &message, nullptr, nullptr);
- trailing_metadata->Set(GrpcStatusMetadata(), status_code);
- trailing_metadata->Set(GrpcMessageMetadata(),
- Slice::FromCopiedString(message));
- }
- if (trailing_metadata->get(GrpcStatusMetadata())
- .value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
- if (!failure_latch->is_set()) {
- failure_latch->Set(std::move(trailing_metadata));
- }
- }
- return Empty{};
- }),
- [](Empty) {});
-
- // Finally assemble the main call promise:
- // Receive initial metadata from the client and start the promise up the
- // filter stack.
- // Upon completion, send trailing metadata to the client and then return it
- // (allowing the call code to decide on what signalling to give the
- // application).
-
- return Map(Seq(std::move(recv_initial_metadata_then_run_promise),
- std::move(send_trailing_metadata)),
- [stream = std::move(stream)](ServerMetadataHandle md) {
- stream->set_finished();
- return md;
- });
-}
+ private:
+ OrphanablePtr<ServerStream> impl_;
+};
#endif
template <ArenaPromise<ServerMetadataHandle> (*make_call_promise)(
grpc_transport*, CallArgs, NextPromiseFactory)>
grpc_channel_filter MakeConnectedFilter() {
// Create a vtable that contains both the legacy call methods (for filter
- // stack based calls) and the new promise based method for creating
- // promise based calls (the latter iff make_call_promise != nullptr). In
- // this way the filter can be inserted into either kind of channel stack,
- // and only if all the filters in the stack are promise based will the
- // call be promise based.
+ // stack based calls) and the new promise based method for creating promise
+ // based calls (the latter iff make_call_promise != nullptr).
+ // In this way the filter can be inserted into either kind of channel stack,
+ // and only if all the filters in the stack are promise based will the call
+ // be promise based.
auto make_call_wrapper = +[](grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next) {
grpc_transport* transport =
@@ -804,11 +1367,12 @@
sizeof(channel_data),
connected_channel_init_channel_elem,
+[](grpc_channel_stack* channel_stack, grpc_channel_element* elem) {
- // HACK(ctiller): increase call stack size for the channel to make
- // space for channel data. We need a cleaner (but performant) way to
- // do this, and I'm not sure what that is yet. This is only "safe"
- // because call stacks place no additional data after the last call
- // element, and the last call element MUST be the connected channel.
+ // HACK(ctiller): increase call stack size for the channel to make space
+ // for channel data. We need a cleaner (but performant) way to do this,
+ // and I'm not sure what that is yet.
+ // This is only "safe" because call stacks place no additional data
+ // after the last call element, and the last call element MUST be the
+ // connected channel.
channel_stack->call_stack_size += grpc_transport_stream_size(
static_cast<channel_data*>(elem->channel_data)->transport);
},
@@ -828,7 +1392,7 @@
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
const grpc_channel_filter kClientEmulatedFilter =
- MakeConnectedFilter<MakeClientCallPromise>();
+ MakeConnectedFilter<ClientConnectedCallPromise::Make>();
#else
const grpc_channel_filter kClientEmulatedFilter =
MakeConnectedFilter<nullptr>();
@@ -836,7 +1400,7 @@
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
const grpc_channel_filter kServerEmulatedFilter =
- MakeConnectedFilter<MakeServerCallPromise>();
+ MakeConnectedFilter<ServerConnectedCallPromise::Make>();
#else
const grpc_channel_filter kServerEmulatedFilter =
MakeConnectedFilter<nullptr>();
@@ -852,20 +1416,20 @@
// We can't know promise based call or not here (that decision needs the
// collaboration of all of the filters on the channel, and we don't want
// ordering constraints on when we add filters).
- // We can know if this results in a promise based call how we'll create
- // our promise (if indeed we can), and so that is the choice made here.
+ // We can know if this results in a promise based call how we'll create our
+ // promise (if indeed we can), and so that is the choice made here.
if (t->vtable->make_call_promise != nullptr) {
- // Option 1, and our ideal: the transport supports promise based calls,
- // and so we simply use the transport directly.
+ // Option 1, and our ideal: the transport supports promise based calls, and
+ // so we simply use the transport directly.
builder->AppendFilter(&grpc_core::kPromiseBasedTransportFilter);
} else if (grpc_channel_stack_type_is_client(builder->channel_stack_type())) {
- // Option 2: the transport does not support promise based calls, but
- // we're on the client and so we have an implementation that we can use
- // to convert to batches.
+ // Option 2: the transport does not support promise based calls, but we're
+ // on the client and so we have an implementation that we can use to convert
+ // to batches.
builder->AppendFilter(&grpc_core::kClientEmulatedFilter);
} else {
- // Option 3: the transport does not support promise based calls, and
- // we're on the server so we use the server filter.
+ // Option 3: the transport does not support promise based calls, and we're
+ // on the server so we use the server filter.
builder->AppendFilter(&grpc_core::kServerEmulatedFilter);
}
return true;
diff --git a/src/core/lib/channel/promise_based_filter.cc b/src/core/lib/channel/promise_based_filter.cc
index 9082022..d44a992 100644
--- a/src/core/lib/channel/promise_based_filter.cc
+++ b/src/core/lib/channel/promise_based_filter.cc
@@ -16,8 +16,6 @@
#include "src/core/lib/channel/promise_based_filter.h"
-#include <inttypes.h>
-
#include <algorithm>
#include <initializer_list>
#include <memory>
@@ -54,7 +52,7 @@
explicit FakeActivity(Activity* wake_activity)
: wake_activity_(wake_activity) {}
void Orphan() override {}
- void ForceImmediateRepoll(WakeupMask) override {}
+ void ForceImmediateRepoll() override {}
Waker MakeOwningWaker() override { return wake_activity_->MakeOwningWaker(); }
Waker MakeNonOwningWaker() override {
return wake_activity_->MakeNonOwningWaker();
@@ -138,22 +136,20 @@
Waker BaseCallData::MakeOwningWaker() {
GRPC_CALL_STACK_REF(call_stack_, "waker");
- return Waker(this, 0);
+ return Waker(this, nullptr);
}
-void BaseCallData::Wakeup(WakeupMask) {
+void BaseCallData::Wakeup(void*) {
auto wakeup = [](void* p, grpc_error_handle) {
auto* self = static_cast<BaseCallData*>(p);
self->OnWakeup();
- self->Drop(0);
+ self->Drop(nullptr);
};
auto* closure = GRPC_CLOSURE_CREATE(wakeup, this, nullptr);
GRPC_CALL_COMBINER_START(call_combiner_, closure, absl::OkStatus(), "wakeup");
}
-void BaseCallData::Drop(WakeupMask) {
- GRPC_CALL_STACK_UNREF(call_stack_, "waker");
-}
+void BaseCallData::Drop(void*) { GRPC_CALL_STACK_UNREF(call_stack_, "waker"); }
std::string BaseCallData::LogTag() const {
return absl::StrCat(
@@ -221,7 +217,7 @@
// refcnt==0 ==> cancelled
if (grpc_trace_channel.enabled()) {
gpr_log(GPR_INFO, "%sRESUME BATCH REQUEST CANCELLED",
- releaser->call()->DebugTag().c_str());
+ Activity::current()->DebugTag().c_str());
}
return;
}
@@ -245,10 +241,6 @@
auto* batch = std::exchange(batch_, nullptr);
GPR_ASSERT(batch != nullptr);
uintptr_t& refcnt = *RefCountField(batch);
- gpr_log(GPR_DEBUG, "%sCancelWith: %p refs=%" PRIdPTR " err=%s [%s]",
- releaser->call()->DebugTag().c_str(), batch, refcnt,
- error.ToString().c_str(),
- grpc_transport_stream_op_batch_string(batch, false).c_str());
if (refcnt == 0) {
// refcnt==0 ==> cancelled
if (grpc_trace_channel.enabled()) {
@@ -339,8 +331,6 @@
return "CANCELLED";
case State::kCancelledButNotYetPolled:
return "CANCELLED_BUT_NOT_YET_POLLED";
- case State::kCancelledButNoStatus:
- return "CANCELLED_BUT_NO_STATUS";
}
return "UNKNOWN";
}
@@ -365,7 +355,6 @@
Crash(absl::StrFormat("ILLEGAL STATE: %s", StateString(state_)));
case State::kCancelled:
case State::kCancelledButNotYetPolled:
- case State::kCancelledButNoStatus:
return;
}
batch_ = batch;
@@ -393,7 +382,6 @@
case State::kForwardedBatch:
case State::kBatchCompleted:
case State::kPushedToPipe:
- case State::kCancelledButNoStatus:
Crash(absl::StrFormat("ILLEGAL STATE: %s", StateString(state_)));
case State::kCancelled:
case State::kCancelledButNotYetPolled:
@@ -409,7 +397,6 @@
case State::kForwardedBatch:
case State::kCancelled:
case State::kCancelledButNotYetPolled:
- case State::kCancelledButNoStatus:
return true;
case State::kGotBatchNoPipe:
case State::kGotBatch:
@@ -438,7 +425,6 @@
break;
case State::kCancelled:
case State::kCancelledButNotYetPolled:
- case State::kCancelledButNoStatus:
flusher.AddClosure(intercepted_on_complete_, status,
"forward after cancel");
break;
@@ -463,14 +449,10 @@
case State::kCancelledButNotYetPolled:
break;
case State::kInitial:
- state_ = State::kCancelled;
- break;
case State::kIdle:
case State::kForwardedBatch:
state_ = State::kCancelledButNotYetPolled;
- if (base_->is_current()) base_->ForceImmediateRepoll();
break;
- case State::kCancelledButNoStatus:
case State::kGotBatchNoPipe:
case State::kGotBatch: {
std::string temp;
@@ -489,7 +471,6 @@
push_.reset();
next_.reset();
state_ = State::kCancelledButNotYetPolled;
- if (base_->is_current()) base_->ForceImmediateRepoll();
break;
}
}
@@ -508,7 +489,6 @@
case State::kIdle:
case State::kGotBatchNoPipe:
case State::kCancelled:
- case State::kCancelledButNoStatus:
break;
case State::kCancelledButNotYetPolled:
interceptor()->Push()->Close();
@@ -550,18 +530,13 @@
"result.has_value=%s",
base_->LogTag().c_str(), p->has_value() ? "true" : "false");
}
- if (p->has_value()) {
- batch_->payload->send_message.send_message->Swap((**p)->payload());
- batch_->payload->send_message.flags = (**p)->flags();
- state_ = State::kForwardedBatch;
- batch_.ResumeWith(flusher);
- next_.reset();
- if ((*push_)().ready()) push_.reset();
- } else {
- state_ = State::kCancelledButNoStatus;
- next_.reset();
- push_.reset();
- }
+ GPR_ASSERT(p->has_value());
+ batch_->payload->send_message.send_message->Swap((**p)->payload());
+ batch_->payload->send_message.flags = (**p)->flags();
+ state_ = State::kForwardedBatch;
+ batch_.ResumeWith(flusher);
+ next_.reset();
+ if ((*push_)().ready()) push_.reset();
}
} break;
case State::kForwardedBatch:
@@ -1119,14 +1094,11 @@
// Poll the promise once since we're waiting for it.
Poll<ServerMetadataHandle> poll = self_->promise_();
if (grpc_trace_channel.enabled()) {
- gpr_log(GPR_INFO, "%s ClientCallData.PollContext.Run: poll=%s; %s",
+ gpr_log(GPR_INFO, "%s ClientCallData.PollContext.Run: poll=%s",
self_->LogTag().c_str(),
- PollToString(poll,
- [](const ServerMetadataHandle& h) {
- return h->DebugString();
- })
- .c_str(),
- self_->DebugString().c_str());
+ PollToString(poll, [](const ServerMetadataHandle& h) {
+ return h->DebugString();
+ }).c_str());
}
if (auto* r = poll.value_if_ready()) {
auto md = std::move(*r);
@@ -1306,11 +1278,7 @@
[args]() {
return args->arena->New<ReceiveInterceptor>(args->arena);
},
- [args]() { return args->arena->New<SendInterceptor>(args->arena); }),
- initial_metadata_outstanding_token_(
- (flags & kFilterIsLast) != 0
- ? ClientInitialMetadataOutstandingToken::New(arena())
- : ClientInitialMetadataOutstandingToken::Empty()) {
+ [args]() { return args->arena->New<SendInterceptor>(args->arena); }) {
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
RecvTrailingMetadataReadyCallback, this,
grpc_schedule_on_exec_ctx);
@@ -1326,12 +1294,8 @@
}
}
-std::string ClientCallData::DebugTag() const {
- return absl::StrFormat("PBF_CLIENT[%p]: [%s] ", this, elem()->filter->name);
-}
-
// Activity implementation.
-void ClientCallData::ForceImmediateRepoll(WakeupMask) {
+void ClientCallData::ForceImmediateRepoll() {
GPR_ASSERT(poll_ctx_ != nullptr);
poll_ctx_->Repoll();
}
@@ -1583,7 +1547,6 @@
promise_ = filter->MakeCallPromise(
CallArgs{WrapMetadata(send_initial_metadata_batch_->payload
->send_initial_metadata.send_initial_metadata),
- std::move(initial_metadata_outstanding_token_),
server_initial_metadata_pipe() == nullptr
? nullptr
: &server_initial_metadata_pipe()->sender,
@@ -1691,7 +1654,8 @@
GPR_ASSERT(poll_ctx_ != nullptr);
GPR_ASSERT(send_initial_state_ == SendInitialState::kQueued);
send_initial_metadata_batch_->payload->send_initial_metadata
- .send_initial_metadata = call_args.client_initial_metadata.get();
+ .send_initial_metadata =
+ UnwrapMetadata(std::move(call_args.client_initial_metadata));
if (recv_initial_metadata_ != nullptr) {
// Call args should contain a latch for receiving initial metadata.
// It might be the one we passed in - in which case we know this filter
@@ -1903,15 +1867,8 @@
class ServerCallData::PollContext {
public:
- explicit PollContext(ServerCallData* self, Flusher* flusher,
- DebugLocation created = DebugLocation())
- : self_(self), flusher_(flusher), created_(created) {
- if (self_->poll_ctx_ != nullptr) {
- Crash(absl::StrCat(
- "PollContext: disallowed recursion. New: ", created_.file(), ":",
- created_.line(), "; Old: ", self_->poll_ctx_->created_.file(), ":",
- self_->poll_ctx_->created_.line()));
- }
+ explicit PollContext(ServerCallData* self, Flusher* flusher)
+ : self_(self), flusher_(flusher) {
GPR_ASSERT(self_->poll_ctx_ == nullptr);
self_->poll_ctx_ = this;
scoped_activity_.Init(self_);
@@ -1957,7 +1914,6 @@
Flusher* const flusher_;
bool repoll_ = false;
bool have_scoped_activity_;
- GPR_NO_UNIQUE_ADDRESS DebugLocation created_;
};
const char* ServerCallData::StateString(RecvInitialState state) {
@@ -2017,18 +1973,11 @@
gpr_log(GPR_INFO, "%s ~ServerCallData %s", LogTag().c_str(),
DebugString().c_str());
}
- if (send_initial_metadata_ != nullptr) {
- send_initial_metadata_->~SendInitialMetadata();
- }
GPR_ASSERT(poll_ctx_ == nullptr);
}
-std::string ServerCallData::DebugTag() const {
- return absl::StrFormat("PBF_SERVER[%p]: [%s] ", this, elem()->filter->name);
-}
-
// Activity implementation.
-void ServerCallData::ForceImmediateRepoll(WakeupMask) {
+void ServerCallData::ForceImmediateRepoll() {
GPR_ASSERT(poll_ctx_ != nullptr);
poll_ctx_->Repoll();
}
@@ -2134,10 +2083,7 @@
switch (send_trailing_state_) {
case SendTrailingState::kInitial:
send_trailing_metadata_batch_ = batch;
- if (receive_message() != nullptr &&
- batch->payload->send_trailing_metadata.send_trailing_metadata
- ->get(GrpcStatusMetadata())
- .value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
+ if (receive_message() != nullptr) {
receive_message()->Done(
*batch->payload->send_trailing_metadata.send_trailing_metadata,
&flusher);
@@ -2194,12 +2140,9 @@
case SendTrailingState::kForwarded:
send_trailing_state_ = SendTrailingState::kCancelled;
if (!error.ok()) {
- call_stack()->IncrementRefCount();
auto* batch = grpc_make_transport_stream_op(
- NewClosure([call_combiner = call_combiner(),
- call_stack = call_stack()](absl::Status) {
+ NewClosure([call_combiner = call_combiner()](absl::Status) {
GRPC_CALL_COMBINER_STOP(call_combiner, "done-cancel");
- call_stack->Unref();
}));
batch->cancel_stream = true;
batch->payload->cancel_stream.cancel_error = error;
@@ -2251,7 +2194,7 @@
ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
CallArgs call_args) {
GPR_ASSERT(recv_initial_state_ == RecvInitialState::kComplete);
- GPR_ASSERT(std::move(call_args.client_initial_metadata).get() ==
+ GPR_ASSERT(UnwrapMetadata(std::move(call_args.client_initial_metadata)) ==
recv_initial_metadata_);
forward_recv_initial_metadata_callback_ = true;
if (send_initial_metadata_ != nullptr) {
@@ -2373,7 +2316,6 @@
FakeActivity(this).Run([this, filter] {
promise_ = filter->MakeCallPromise(
CallArgs{WrapMetadata(recv_initial_metadata_),
- ClientInitialMetadataOutstandingToken::Empty(),
server_initial_metadata_pipe() == nullptr
? nullptr
: &server_initial_metadata_pipe()->sender,
@@ -2474,14 +2416,9 @@
(send_trailing_metadata_batch_->send_message &&
send_message()->IsForwarded()))) {
send_trailing_state_ = SendTrailingState::kQueued;
- if (send_trailing_metadata_batch_->payload->send_trailing_metadata
- .send_trailing_metadata->get(GrpcStatusMetadata())
- .value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
- send_message()->Done(
- *send_trailing_metadata_batch_->payload->send_trailing_metadata
- .send_trailing_metadata,
- flusher);
- }
+ send_message()->Done(*send_trailing_metadata_batch_->payload
+ ->send_trailing_metadata.send_trailing_metadata,
+ flusher);
}
}
if (receive_message() != nullptr) {
@@ -2532,7 +2469,8 @@
}
if (auto* r = poll.value_if_ready()) {
promise_ = ArenaPromise<ServerMetadataHandle>();
- auto md = std::move(*r);
+ auto* md = UnwrapMetadata(std::move(*r));
+ bool destroy_md = true;
if (send_message() != nullptr) {
send_message()->Done(*md, flusher);
}
@@ -2544,9 +2482,11 @@
case SendTrailingState::kQueuedButHaventClosedSends:
case SendTrailingState::kQueued: {
if (send_trailing_metadata_batch_->payload->send_trailing_metadata
- .send_trailing_metadata != md.get()) {
+ .send_trailing_metadata != md) {
*send_trailing_metadata_batch_->payload->send_trailing_metadata
.send_trailing_metadata = std::move(*md);
+ } else {
+ destroy_md = false;
}
send_trailing_metadata_batch_.ResumeWith(flusher);
send_trailing_state_ = SendTrailingState::kForwarded;
@@ -2564,6 +2504,9 @@
// Nothing to do.
break;
}
+ if (destroy_md) {
+ md->~grpc_metadata_batch();
+ }
}
}
if (std::exchange(forward_recv_initial_metadata_callback_, false)) {
diff --git a/src/core/lib/channel/promise_based_filter.h b/src/core/lib/channel/promise_based_filter.h
index 78d413d..e5ec0b2 100644
--- a/src/core/lib/channel/promise_based_filter.h
+++ b/src/core/lib/channel/promise_based_filter.h
@@ -184,7 +184,7 @@
Waker MakeNonOwningWaker() final;
Waker MakeOwningWaker() final;
- std::string ActivityDebugTag(WakeupMask) const override { return DebugTag(); }
+ std::string ActivityDebugTag(void*) const override { return DebugTag(); }
void Finalize(const grpc_call_final_info* final_info) {
finalization_.Run(final_info);
@@ -222,11 +222,7 @@
void Resume(grpc_transport_stream_op_batch* batch) {
GPR_ASSERT(!call_->is_last());
- if (batch->HasOp()) {
- release_.push_back(batch);
- } else if (batch->on_complete != nullptr) {
- Complete(batch);
- }
+ release_.push_back(batch);
}
void Cancel(grpc_transport_stream_op_batch* batch,
@@ -245,8 +241,6 @@
call_closures_.Add(closure, error, reason);
}
- BaseCallData* call() const { return call_; }
-
private:
absl::InlinedVector<grpc_transport_stream_op_batch*, 1> release_;
CallCombinerClosureList call_closures_;
@@ -290,6 +284,11 @@
Arena::PooledDeleter(nullptr));
}
+ static grpc_metadata_batch* UnwrapMetadata(
+ Arena::PoolPtr<grpc_metadata_batch> p) {
+ return p.release();
+ }
+
class ReceiveInterceptor final : public Interceptor {
public:
explicit ReceiveInterceptor(Arena* arena) : pipe_{arena} {}
@@ -403,8 +402,6 @@
kCancelledButNotYetPolled,
// We're done.
kCancelled,
- // We're done, but we haven't gotten a status yet
- kCancelledButNoStatus,
};
static const char* StateString(State);
@@ -545,8 +542,8 @@
private:
// Wakeable implementation.
- void Wakeup(WakeupMask) final;
- void Drop(WakeupMask) final;
+ void Wakeup(void*) final;
+ void Drop(void*) final;
virtual void OnWakeup() = 0;
@@ -572,12 +569,10 @@
~ClientCallData() override;
// Activity implementation.
- void ForceImmediateRepoll(WakeupMask) final;
+ void ForceImmediateRepoll() final;
// Handle one grpc_transport_stream_op_batch
void StartBatch(grpc_transport_stream_op_batch* batch) override;
- std::string DebugTag() const override;
-
private:
// At what stage is our handling of send initial metadata?
enum class SendInitialState {
@@ -674,8 +669,6 @@
RecvTrailingState recv_trailing_state_ = RecvTrailingState::kInitial;
// Polling related data. Non-null if we're actively polling
PollContext* poll_ctx_ = nullptr;
- // Initial metadata outstanding token
- ClientInitialMetadataOutstandingToken initial_metadata_outstanding_token_;
};
class ServerCallData : public BaseCallData {
@@ -685,12 +678,10 @@
~ServerCallData() override;
// Activity implementation.
- void ForceImmediateRepoll(WakeupMask) final;
+ void ForceImmediateRepoll() final;
// Handle one grpc_transport_stream_op_batch
void StartBatch(grpc_transport_stream_op_batch* batch) override;
- std::string DebugTag() const override;
-
protected:
absl::string_view ClientOrServerString() const override { return "SVR"; }
diff --git a/src/core/lib/gprpp/orphanable.h b/src/core/lib/gprpp/orphanable.h
index a2f24cb..b9b2913 100644
--- a/src/core/lib/gprpp/orphanable.h
+++ b/src/core/lib/gprpp/orphanable.h
@@ -69,7 +69,7 @@
}
// A type of Orphanable with internal ref-counting.
-template <typename Child, typename UnrefBehavior = UnrefDelete>
+template <typename Child, UnrefBehavior UnrefBehaviorArg = kUnrefDelete>
class InternallyRefCounted : public Orphanable {
public:
// Not copyable nor movable.
@@ -99,12 +99,12 @@
void Unref() {
if (GPR_UNLIKELY(refs_.Unref())) {
- unref_behavior_(static_cast<Child*>(this));
+ internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (GPR_UNLIKELY(refs_.Unref(location, reason))) {
- unref_behavior_(static_cast<Child*>(this));
+ internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
}
}
@@ -115,7 +115,6 @@
}
RefCount refs_;
- GPR_NO_UNIQUE_ADDRESS UnrefBehavior unref_behavior_;
};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index 96fe288..0667919 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -213,34 +213,41 @@
};
// Behavior of RefCounted<> upon ref count reaching 0.
-
-// Default behavior: Delete the object.
-struct UnrefDelete {
- template <typename T>
- void operator()(T* p) {
- delete p;
- }
+enum UnrefBehavior {
+ // Default behavior: Delete the object.
+ kUnrefDelete,
+ // Do not delete the object upon unref. This is useful in cases where all
+ // existing objects must be tracked in a registry but the object's entry in
+ // the registry cannot be removed from the object's dtor due to
+ // synchronization issues. In this case, the registry can be cleaned up
+ // later by identifying entries for which RefIfNonZero() returns null.
+ kUnrefNoDelete,
+ // Call the object's dtor but do not delete it. This is useful for cases
+ // where the object is stored in memory allocated elsewhere (e.g., the call
+ // arena).
+ kUnrefCallDtor,
};
-// Do not delete the object upon unref. This is useful in cases where all
-// existing objects must be tracked in a registry but the object's entry in
-// the registry cannot be removed from the object's dtor due to
-// synchronization issues. In this case, the registry can be cleaned up
-// later by identifying entries for which RefIfNonZero() returns null.
-struct UnrefNoDelete {
- template <typename T>
- void operator()(T* /*p*/) {}
-};
+namespace internal {
+template <typename T, UnrefBehavior UnrefBehaviorArg>
+class Delete;
-// Call the object's dtor but do not delete it. This is useful for cases
-// where the object is stored in memory allocated elsewhere (e.g., the call
-// arena).
-struct UnrefCallDtor {
- template <typename T>
- void operator()(T* p) {
- p->~T();
- }
+template <typename T>
+class Delete<T, kUnrefDelete> {
+ public:
+ explicit Delete(T* t) { delete t; }
};
+template <typename T>
+class Delete<T, kUnrefNoDelete> {
+ public:
+ explicit Delete(T* /*t*/) {}
+};
+template <typename T>
+class Delete<T, kUnrefCallDtor> {
+ public:
+ explicit Delete(T* t) { t->~T(); }
+};
+} // namespace internal
// A base class for reference-counted objects.
// New objects should be created via new and start with a refcount of 1.
@@ -269,7 +276,7 @@
// ch->Unref();
//
template <typename Child, typename Impl = PolymorphicRefCount,
- typename UnrefBehavior = UnrefDelete>
+ UnrefBehavior UnrefBehaviorArg = kUnrefDelete>
class RefCounted : public Impl {
public:
using RefCountedChildType = Child;
@@ -294,12 +301,12 @@
// friend of this class.
void Unref() {
if (GPR_UNLIKELY(refs_.Unref())) {
- unref_behavior_(static_cast<Child*>(this));
+ internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (GPR_UNLIKELY(refs_.Unref(location, reason))) {
- unref_behavior_(static_cast<Child*>(this));
+ internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
}
}
@@ -324,11 +331,6 @@
intptr_t initial_refcount = 1)
: refs_(initial_refcount, trace) {}
- // Note: Tracing is a no-op on non-debug builds.
- explicit RefCounted(UnrefBehavior b, const char* trace = nullptr,
- intptr_t initial_refcount = 1)
- : refs_(initial_refcount, trace), unref_behavior_(b) {}
-
private:
// Allow RefCountedPtr<> to access IncrementRefCount().
template <typename T>
@@ -340,7 +342,6 @@
}
RefCount refs_;
- GPR_NO_UNIQUE_ADDRESS UnrefBehavior unref_behavior_;
};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/thd.h b/src/core/lib/gprpp/thd.h
index a2d9101..16a9188 100644
--- a/src/core/lib/gprpp/thd.h
+++ b/src/core/lib/gprpp/thd.h
@@ -25,11 +25,6 @@
#include <stddef.h>
-#include <memory>
-#include <utility>
-
-#include "absl/functional/any_invocable.h"
-
#include <grpc/support/log.h>
namespace grpc_core {
@@ -91,17 +86,6 @@
Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
bool* success = nullptr, const Options& options = Options());
- Thread(const char* thd_name, absl::AnyInvocable<void()> fn,
- bool* success = nullptr, const Options& options = Options())
- : Thread(
- thd_name,
- [](void* p) {
- std::unique_ptr<absl::AnyInvocable<void()>> fn_from_p(
- static_cast<absl::AnyInvocable<void()>*>(p));
- (*fn_from_p)();
- },
- new absl::AnyInvocable<void()>(std::move(fn)), success, options) {}
-
/// Move constructor for thread. After this is called, the other thread
/// no longer represents a living thread object
Thread(Thread&& other) noexcept
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index e314479..50aeb63 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -171,8 +171,8 @@
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"CallCombinerClosureList executing closure while already "
- "holding call_combiner %p: closure=%s error=%s reason=%s",
- call_combiner, closures_[0].closure->DebugString().c_str(),
+ "holding call_combiner %p: closure=%p error=%s reason=%s",
+ call_combiner, closures_[0].closure,
StatusToString(closures_[0].error).c_str(), closures_[0].reason);
}
// This will release the call combiner.
diff --git a/src/core/lib/promise/activity.cc b/src/core/lib/promise/activity.cc
index b982b84..da009f9 100644
--- a/src/core/lib/promise/activity.cc
+++ b/src/core/lib/promise/activity.cc
@@ -19,11 +19,8 @@
#include <stddef.h>
#include <initializer_list>
-#include <vector>
-#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
-#include "absl/strings/str_join.h"
#include "src/core/lib/gprpp/atomic_utils.h"
@@ -39,9 +36,7 @@
///////////////////////////////////////////////////////////////////////////////
// HELPER TYPES
-std::string Unwakeable::ActivityDebugTag(WakeupMask) const {
- return "<unknown>";
-}
+std::string Unwakeable::ActivityDebugTag(void*) const { return "<unknown>"; }
// Weak handle to an Activity.
// Handle can persist while Activity goes away.
@@ -63,7 +58,7 @@
// Activity needs to wake up (if it still exists!) - wake it up, and drop the
// ref that was kept for this handle.
- void Wakeup(WakeupMask) override ABSL_LOCKS_EXCLUDED(mu_) {
+ void Wakeup(void*) override ABSL_LOCKS_EXCLUDED(mu_) {
mu_.Lock();
// Note that activity refcount can drop to zero, but we could win the lock
// against DropActivity, so we need to only increase activities refcount if
@@ -73,7 +68,7 @@
mu_.Unlock();
// Activity still exists and we have a reference: wake it up, which will
// drop the ref.
- activity->Wakeup(0);
+ activity->Wakeup(nullptr);
} else {
// Could not get the activity - it's either gone or going. No need to wake
// it up!
@@ -83,9 +78,9 @@
Unref();
}
- void Drop(WakeupMask) override { Unref(); }
+ void Drop(void*) override { Unref(); }
- std::string ActivityDebugTag(WakeupMask) const override {
+ std::string ActivityDebugTag(void*) const override {
MutexLock lock(&mu_);
return activity_ == nullptr ? "<unknown>" : activity_->DebugTag();
}
@@ -129,7 +124,7 @@
Waker FreestandingActivity::MakeNonOwningWaker() {
mu_.AssertHeld();
- return Waker(RefHandle(), 0);
+ return Waker(RefHandle(), nullptr);
}
} // namespace promise_detail
@@ -138,15 +133,4 @@
return absl::StrFormat("ACTIVITY[%p]", this);
}
-///////////////////////////////////////////////////////////////////////////////
-// INTRA ACTIVITY WAKER IMPLEMENTATION
-
-std::string IntraActivityWaiter::DebugString() const {
- std::vector<int> bits;
- for (size_t i = 0; i < 8 * sizeof(WakeupMask); i++) {
- if (wakeups_ & (1 << i)) bits.push_back(i);
- }
- return absl::StrCat("{", absl::StrJoin(bits, ","), "}");
-}
-
} // namespace grpc_core
diff --git a/src/core/lib/promise/activity.h b/src/core/lib/promise/activity.h
index 8e198f0..67933de 100644
--- a/src/core/lib/promise/activity.h
+++ b/src/core/lib/promise/activity.h
@@ -38,29 +38,24 @@
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/detail/promise_factory.h"
#include "src/core/lib/promise/detail/status.h"
-#include "src/core/lib/promise/poll.h"
namespace grpc_core {
class Activity;
-// WakeupMask is a bitfield representing which parts of an activity should be
-// woken up.
-using WakeupMask = uint16_t;
-
// A Wakeable object is used by queues to wake activities.
class Wakeable {
public:
// Wake up the underlying activity.
// After calling, this Wakeable cannot be used again.
- // WakeupMask comes from the activity that created this Wakeable and specifies
- // the set of promises that should be awoken.
- virtual void Wakeup(WakeupMask wakeup_mask) = 0;
+ // arg comes from the Waker object and allows one Wakeable instance to be used
+ // for multiple disjoint subparts of an Activity.
+ virtual void Wakeup(void* arg) = 0;
// Drop this wakeable without waking up the underlying activity.
- virtual void Drop(WakeupMask wakeup_mask) = 0;
+ virtual void Drop(void* arg) = 0;
// Return the underlying activity debug tag, or "<unknown>" if not available.
- virtual std::string ActivityDebugTag(WakeupMask wakeup_mask) const = 0;
+ virtual std::string ActivityDebugTag(void* arg) const = 0;
protected:
inline ~Wakeable() {}
@@ -68,9 +63,9 @@
namespace promise_detail {
struct Unwakeable final : public Wakeable {
- void Wakeup(WakeupMask) override {}
- void Drop(WakeupMask) override {}
- std::string ActivityDebugTag(WakeupMask) const override;
+ void Wakeup(void*) override {}
+ void Drop(void*) override {}
+ std::string ActivityDebugTag(void*) const override;
};
static Unwakeable* unwakeable() {
return NoDestructSingleton<Unwakeable>::Get();
@@ -81,9 +76,8 @@
// This type is non-copyable but movable.
class Waker {
public:
- Waker(Wakeable* wakeable, WakeupMask wakeup_mask)
- : wakeable_and_arg_{wakeable, wakeup_mask} {}
- Waker() : Waker(promise_detail::unwakeable(), 0) {}
+ Waker(Wakeable* wakeable, void* arg) : wakeable_and_arg_{wakeable, arg} {}
+ Waker() : Waker(promise_detail::unwakeable(), nullptr) {}
~Waker() { wakeable_and_arg_.Drop(); }
Waker(const Waker&) = delete;
Waker& operator=(const Waker&) = delete;
@@ -99,7 +93,7 @@
template <typename H>
friend H AbslHashValue(H h, const Waker& w) {
return H::combine(H::combine(std::move(h), w.wakeable_and_arg_.wakeable),
- w.wakeable_and_arg_.wakeup_mask);
+ w.wakeable_and_arg_.arg);
}
bool operator==(const Waker& other) const noexcept {
@@ -122,42 +116,27 @@
private:
struct WakeableAndArg {
Wakeable* wakeable;
- WakeupMask wakeup_mask;
+ void* arg;
- void Wakeup() { wakeable->Wakeup(wakeup_mask); }
- void Drop() { wakeable->Drop(wakeup_mask); }
+ void Wakeup() { wakeable->Wakeup(arg); }
+ void Drop() { wakeable->Drop(arg); }
std::string ActivityDebugTag() const {
return wakeable == nullptr ? "<unknown>"
- : wakeable->ActivityDebugTag(wakeup_mask);
+ : wakeable->ActivityDebugTag(arg);
}
bool operator==(const WakeableAndArg& other) const noexcept {
- return wakeable == other.wakeable && wakeup_mask == other.wakeup_mask;
+ return wakeable == other.wakeable && arg == other.arg;
}
};
WakeableAndArg Take() {
- return std::exchange(wakeable_and_arg_, {promise_detail::unwakeable(), 0});
+ return std::exchange(wakeable_and_arg_,
+ {promise_detail::unwakeable(), nullptr});
}
WakeableAndArg wakeable_and_arg_;
};
-// Helper type to track wakeups between objects in the same activity.
-// Can be fairly fast as no ref counting or locking needs to occur.
-class IntraActivityWaiter {
- public:
- // Register for wakeup, return Pending(). If state is not ready to proceed,
- // Promises should bottom out here.
- Pending pending();
- // Wake the activity
- void Wake();
-
- std::string DebugString() const;
-
- private:
- WakeupMask wakeups_ = 0;
-};
-
// An Activity tracks execution of a single promise.
// It executes the promise under a mutex.
// When the promise stalls, it registers the containing activity to be woken up
@@ -177,13 +156,7 @@
void ForceWakeup() { MakeOwningWaker().Wakeup(); }
// Force the current activity to immediately repoll if it doesn't complete.
- virtual void ForceImmediateRepoll(WakeupMask mask) = 0;
- // Legacy version of ForceImmediateRepoll() that uses the current participant.
- // Will go away once Party gets merged with Activity. New usage is banned.
- void ForceImmediateRepoll() { ForceImmediateRepoll(CurrentParticipant()); }
-
- // Return the current part of the activity as a bitmask
- virtual WakeupMask CurrentParticipant() const { return 1; }
+ virtual void ForceImmediateRepoll() = 0;
// Return the current activity.
// Additionally:
@@ -311,7 +284,7 @@
public:
Waker MakeOwningWaker() final {
Ref();
- return Waker(this, 0);
+ return Waker(this, nullptr);
}
Waker MakeNonOwningWaker() final;
@@ -320,7 +293,7 @@
Unref();
}
- void ForceImmediateRepoll(WakeupMask) final {
+ void ForceImmediateRepoll() final {
mu_.AssertHeld();
SetActionDuringRun(ActionDuringRun::kWakeup);
}
@@ -360,7 +333,7 @@
Mutex* mu() ABSL_LOCK_RETURNED(mu_) { return &mu_; }
- std::string ActivityDebugTag(WakeupMask) const override { return DebugTag(); }
+ std::string ActivityDebugTag(void*) const override { return DebugTag(); }
private:
class Handle;
@@ -494,7 +467,7 @@
// the activity to an external threadpool to run. If the activity is already
// running on this thread, a note is taken of such and the activity is
// repolled if it doesn't complete.
- void Wakeup(WakeupMask) final {
+ void Wakeup(void*) final {
// If there is an active activity, but hey it's us, flag that and we'll loop
// in RunLoop (that's calling from above here!).
if (Activity::is_current()) {
@@ -513,7 +486,7 @@
}
// Drop a wakeup
- void Drop(WakeupMask) final { this->WakeupComplete(); }
+ void Drop(void*) final { this->WakeupComplete(); }
// Notification that we're no longer executing - it's ok to destruct the
// promise.
@@ -620,16 +593,6 @@
std::move(on_done), std::forward<Contexts>(contexts)...));
}
-inline Pending IntraActivityWaiter::pending() {
- wakeups_ |= Activity::current()->CurrentParticipant();
- return Pending();
-}
-
-inline void IntraActivityWaiter::Wake() {
- if (wakeups_ == 0) return;
- Activity::current()->ForceImmediateRepoll(std::exchange(wakeups_, 0));
-}
-
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_PROMISE_ACTIVITY_H
diff --git a/src/core/lib/promise/context.h b/src/core/lib/promise/context.h
index 46da324..d730407 100644
--- a/src/core/lib/promise/context.h
+++ b/src/core/lib/promise/context.h
@@ -33,20 +33,10 @@
namespace promise_detail {
-struct KeepExistingIfPresent {};
-
template <typename T>
class Context : public ContextType<T> {
public:
explicit Context(T* p) : old_(current_) { current_ = p; }
- // HACKY, try to remove.
- // If a context is present, then don't override it during context
- // initialization.
- // Currently used to keep BatchBuilder across multiple ops in Call StartBatch,
- // but we should be able to drop this once we have promise based transports.
- Context(KeepExistingIfPresent, T* p) : old_(current_) {
- if (current_ == nullptr) current_ = p;
- }
~Context() { current_ = old_; }
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
diff --git a/src/core/lib/promise/detail/promise_factory.h b/src/core/lib/promise/detail/promise_factory.h
index adca5af..12b291a 100644
--- a/src/core/lib/promise/detail/promise_factory.h
+++ b/src/core/lib/promise/detail/promise_factory.h
@@ -17,7 +17,6 @@
#include <grpc/support/port_platform.h>
-#include <memory>
#include <type_traits>
#include <utility>
@@ -107,9 +106,6 @@
private:
GPR_NO_UNIQUE_ADDRESS F f_;
GPR_NO_UNIQUE_ADDRESS Arg arg_;
-#ifndef NDEBUG
- std::unique_ptr<int> asan_canary_ = std::make_unique<int>(0);
-#endif
};
// Promote a callable(A) -> T | Poll<T> to a PromiseFactory(A) -> Promise<T> by
diff --git a/src/core/lib/promise/if.h b/src/core/lib/promise/if.h
index 22956a2..7413971 100644
--- a/src/core/lib/promise/if.h
+++ b/src/core/lib/promise/if.h
@@ -17,9 +17,7 @@
#include <grpc/support/port_platform.h>
-#include <memory>
#include <type_traits>
-#include <utility>
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
@@ -164,9 +162,6 @@
}
Poll<Result> operator()() {
-#ifndef NDEBUG
- asan_canary_ = std::make_unique<int>(1 + *asan_canary_);
-#endif
if (condition_) {
return if_true_();
} else {
@@ -180,10 +175,6 @@
TruePromise if_true_;
FalsePromise if_false_;
};
- // Make failure to destruct show up in ASAN builds.
-#ifndef NDEBUG
- std::unique_ptr<int> asan_canary_ = std::make_unique<int>(0);
-#endif
};
} // namespace promise_detail
diff --git a/src/core/lib/promise/interceptor_list.h b/src/core/lib/promise/interceptor_list.h
index 1e460b9..546b46d 100644
--- a/src/core/lib/promise/interceptor_list.h
+++ b/src/core/lib/promise/interceptor_list.h
@@ -89,10 +89,6 @@
public:
RunPromise(size_t memory_required, Map* factory, absl::optional<T> value) {
if (!value.has_value() || factory == nullptr) {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG,
- "InterceptorList::RunPromise[%p]: create immediate", this);
- }
is_immediately_resolved_ = true;
Construct(&result_, std::move(value));
} else {
@@ -100,18 +96,10 @@
Construct(&async_resolution_, memory_required);
factory->MakePromise(std::move(*value), async_resolution_.space.get());
async_resolution_.current_factory = factory;
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG,
- "InterceptorList::RunPromise[%p]: create async; mem=%p", this,
- async_resolution_.space.get());
- }
}
}
~RunPromise() {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: destroy", this);
- }
if (is_immediately_resolved_) {
Destruct(&result_);
} else {
@@ -128,10 +116,6 @@
RunPromise(RunPromise&& other) noexcept
: is_immediately_resolved_(other.is_immediately_resolved_) {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: move from %p",
- this, &other);
- }
if (is_immediately_resolved_) {
Construct(&result_, std::move(other.result_));
} else {
@@ -143,7 +127,7 @@
Poll<absl::optional<T>> operator()() {
if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: %s", this,
+ gpr_log(GPR_DEBUG, "InterceptorList::RunPromise: %s",
DebugString().c_str());
}
if (is_immediately_resolved_) return std::move(result_);
@@ -155,12 +139,7 @@
async_resolution_.space.get());
async_resolution_.current_factory =
async_resolution_.current_factory->next();
- if (!p->has_value()) async_resolution_.current_factory = nullptr;
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: %s", this,
- DebugString().c_str());
- }
- if (async_resolution_.current_factory == nullptr) {
+ if (async_resolution_.current_factory == nullptr || !p->has_value()) {
return std::move(*p);
}
async_resolution_.current_factory->MakePromise(
diff --git a/src/core/lib/promise/intra_activity_waiter.h b/src/core/lib/promise/intra_activity_waiter.h
new file mode 100644
index 0000000..736ec04
--- /dev/null
+++ b/src/core/lib/promise/intra_activity_waiter.h
@@ -0,0 +1,55 @@
+// Copyright 2021 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef GRPC_SRC_CORE_LIB_PROMISE_INTRA_ACTIVITY_WAITER_H
+#define GRPC_SRC_CORE_LIB_PROMISE_INTRA_ACTIVITY_WAITER_H
+
+#include <grpc/support/port_platform.h>
+
+#include <string>
+
+#include "src/core/lib/promise/activity.h"
+#include "src/core/lib/promise/poll.h"
+
+namespace grpc_core {
+
+// Helper type to track wakeups between objects in the same activity.
+// Can be fairly fast as no ref counting or locking needs to occur.
+class IntraActivityWaiter {
+ public:
+ // Register for wakeup, return Pending(). If state is not ready to proceed,
+ // Promises should bottom out here.
+ Pending pending() {
+ waiting_ = true;
+ return Pending();
+ }
+ // Wake the activity
+ void Wake() {
+ if (waiting_) {
+ waiting_ = false;
+ Activity::current()->ForceImmediateRepoll();
+ }
+ }
+
+ std::string DebugString() const {
+ return waiting_ ? "WAITING" : "NOT_WAITING";
+ }
+
+ private:
+ bool waiting_ = false;
+};
+
+} // namespace grpc_core
+
+#endif // GRPC_SRC_CORE_LIB_PROMISE_INTRA_ACTIVITY_WAITER_H
diff --git a/src/core/lib/promise/latch.h b/src/core/lib/promise/latch.h
index 9d33fe7..305cf53 100644
--- a/src/core/lib/promise/latch.h
+++ b/src/core/lib/promise/latch.h
@@ -19,7 +19,6 @@
#include <stdint.h>
-#include <atomic>
#include <string>
#include <type_traits>
#include <utility>
@@ -30,6 +29,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/promise/activity.h"
+#include "src/core/lib/promise/intra_activity_waiter.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/trace.h"
@@ -61,14 +61,13 @@
}
// Produce a promise to wait for a value from this latch.
- // Moves the result out of the latch.
auto Wait() {
#ifndef NDEBUG
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%sWait %s", DebugTag().c_str(),
+ gpr_log(GPR_INFO, "%sPollWait %s", DebugTag().c_str(),
StateString().c_str());
}
if (has_value_) {
@@ -79,25 +78,6 @@
};
}
- // Produce a promise to wait for a value from this latch.
- // Copies the result out of the latch.
- auto WaitAndCopy() {
-#ifndef NDEBUG
- has_had_waiters_ = true;
-#endif
- return [this]() -> Poll<T> {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%sWaitAndCopy %s", DebugTag().c_str(),
- StateString().c_str());
- }
- if (has_value_) {
- return value_;
- } else {
- return waiter_.pending();
- }
- };
- }
-
// Set the value of the latch. Can only be called once.
void Set(T value) {
if (grpc_trace_promise_primitives.enabled()) {
@@ -109,8 +89,6 @@
waiter_.Wake();
}
- bool is_set() const { return has_value_; }
-
private:
std::string DebugTag() {
return absl::StrCat(Activity::current()->DebugTag(), " LATCH[0x",
@@ -187,7 +165,7 @@
private:
std::string DebugTag() {
- return absl::StrCat(Activity::current()->DebugTag(), " LATCH(void)[0x",
+ return absl::StrCat(Activity::current()->DebugTag(), " LATCH[0x",
reinterpret_cast<uintptr_t>(this), "]: ");
}
@@ -205,70 +183,6 @@
IntraActivityWaiter waiter_;
};
-// A Latch that can have its value observed by outside threads, but only waited
-// upon from inside a single activity.
-template <typename T>
-class ExternallyObservableLatch;
-
-template <>
-class ExternallyObservableLatch<void> {
- public:
- ExternallyObservableLatch() = default;
- ExternallyObservableLatch(const ExternallyObservableLatch&) = delete;
- ExternallyObservableLatch& operator=(const ExternallyObservableLatch&) =
- delete;
-
- // Produce a promise to wait for this latch.
- auto Wait() {
- return [this]() -> Poll<Empty> {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%sPollWait %s", DebugTag().c_str(),
- StateString().c_str());
- }
- if (IsSet()) {
- return Empty{};
- } else {
- return waiter_.pending();
- }
- };
- }
-
- // Set the latch.
- void Set() {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%sSet %s", DebugTag().c_str(), StateString().c_str());
- }
- is_set_.store(true, std::memory_order_relaxed);
- waiter_.Wake();
- }
-
- bool IsSet() const { return is_set_.load(std::memory_order_relaxed); }
-
- void Reset() {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%sReset %s", DebugTag().c_str(),
- StateString().c_str());
- }
- is_set_.store(false, std::memory_order_relaxed);
- }
-
- private:
- std::string DebugTag() {
- return absl::StrCat(Activity::current()->DebugTag(), " LATCH(void)[0x",
- reinterpret_cast<uintptr_t>(this), "]: ");
- }
-
- std::string StateString() {
- return absl::StrCat(
- "is_set:", is_set_.load(std::memory_order_relaxed) ? "true" : "false",
- " waiter:", waiter_.DebugString());
- }
-
- // True if we have a value set, false otherwise.
- std::atomic<bool> is_set_{false};
- IntraActivityWaiter waiter_;
-};
-
template <typename T>
using LatchWaitPromise = decltype(std::declval<Latch<T>>().Wait());
diff --git a/src/core/lib/promise/loop.h b/src/core/lib/promise/loop.h
index 0833865..f0b3f71 100644
--- a/src/core/lib/promise/loop.h
+++ b/src/core/lib/promise/loop.h
@@ -17,13 +17,14 @@
#include <grpc/support/port_platform.h>
+#include <new>
#include <type_traits>
+#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
-#include "src/core/lib/gprpp/construct_destruct.h"
#include "src/core/lib/promise/detail/promise_factory.h"
#include "src/core/lib/promise/poll.h"
@@ -82,21 +83,17 @@
public:
using Result = typename LoopTraits<PromiseResult>::Result;
- explicit Loop(F f) : factory_(std::move(f)) {}
- ~Loop() {
- if (started_) Destruct(&promise_);
- }
+ explicit Loop(F f) : factory_(std::move(f)), promise_(factory_.Make()) {}
+ ~Loop() { promise_.~PromiseType(); }
- Loop(Loop&& loop) noexcept : factory_(std::move(loop.factory_)) {}
+ Loop(Loop&& loop) noexcept
+ : factory_(std::move(loop.factory_)),
+ promise_(std::move(loop.promise_)) {}
Loop(const Loop& loop) = delete;
Loop& operator=(const Loop& loop) = delete;
Poll<Result> operator()() {
- if (!started_) {
- started_ = true;
- Construct(&promise_, factory_.Make());
- }
while (true) {
// Poll the inner promise.
auto promise_result = promise_();
@@ -106,8 +103,8 @@
// from our factory.
auto lc = LoopTraits<PromiseResult>::ToLoopCtl(*p);
if (absl::holds_alternative<Continue>(lc)) {
- Destruct(&promise_);
- Construct(&promise_, factory_.Make());
+ promise_.~PromiseType();
+ new (&promise_) PromiseType(factory_.Make());
continue;
}
// - otherwise there's our result... return it out.
@@ -124,7 +121,6 @@
GPR_NO_UNIQUE_ADDRESS union {
GPR_NO_UNIQUE_ADDRESS PromiseType promise_;
};
- bool started_ = false;
};
} // namespace promise_detail
diff --git a/src/core/lib/promise/map.h b/src/core/lib/promise/map.h
index 44e19bb..a3088d9 100644
--- a/src/core/lib/promise/map.h
+++ b/src/core/lib/promise/map.h
@@ -39,13 +39,6 @@
Map(Promise promise, Fn fn)
: promise_(std::move(promise)), fn_(std::move(fn)) {}
- Map(const Map&) = delete;
- Map& operator=(const Map&) = delete;
- // NOLINTNEXTLINE(performance-noexcept-move-constructor): clang6 bug
- Map(Map&& other) = default;
- // NOLINTNEXTLINE(performance-noexcept-move-constructor): clang6 bug
- Map& operator=(Map&& other) = default;
-
using PromiseResult = typename PromiseLike<Promise>::Result;
using Result =
RemoveCVRef<decltype(std::declval<Fn>()(std::declval<PromiseResult>()))>;
diff --git a/src/core/lib/promise/observable.h b/src/core/lib/promise/observable.h
new file mode 100644
index 0000000..3138d90
--- /dev/null
+++ b/src/core/lib/promise/observable.h
@@ -0,0 +1,295 @@
+// Copyright 2021 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef GRPC_SRC_CORE_LIB_PROMISE_OBSERVABLE_H
+#define GRPC_SRC_CORE_LIB_PROMISE_OBSERVABLE_H
+
+#include <grpc/support/port_platform.h>
+
+#include <stdint.h>
+
+#include <limits>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/thread_annotations.h"
+#include "absl/types/optional.h"
+
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/promise/activity.h"
+#include "src/core/lib/promise/detail/promise_like.h"
+#include "src/core/lib/promise/poll.h"
+#include "src/core/lib/promise/wait_set.h"
+
+namespace grpc_core {
+
+namespace promise_detail {
+
+using ObservableVersion = uint64_t;
+static constexpr ObservableVersion kTombstoneVersion =
+ std::numeric_limits<ObservableVersion>::max();
+
+} // namespace promise_detail
+
+class WatchCommitter {
+ public:
+ void Commit() { version_seen_ = promise_detail::kTombstoneVersion; }
+
+ protected:
+ promise_detail::ObservableVersion version_seen_ = 0;
+};
+
+namespace promise_detail {
+
+// Shared state between Observable and Observer.
+template <typename T>
+class ObservableState {
+ public:
+ explicit ObservableState(absl::optional<T> value)
+ : value_(std::move(value)) {}
+
+ // Publish that we're closed.
+ void Close() {
+ mu_.Lock();
+ version_ = kTombstoneVersion;
+ value_.reset();
+ auto wakeup = waiters_.TakeWakeupSet();
+ mu_.Unlock();
+ wakeup.Wakeup();
+ }
+
+ // Synchronously publish a new value, and wake any waiters.
+ void Push(T value) {
+ mu_.Lock();
+ version_++;
+ value_ = std::move(value);
+ auto wakeup = waiters_.TakeWakeupSet();
+ mu_.Unlock();
+ wakeup.Wakeup();
+ }
+
+ Poll<absl::optional<T>> PollGet(ObservableVersion* version_seen) {
+ MutexLock lock(&mu_);
+ if (!Started()) return Pending();
+ *version_seen = version_;
+ return value_;
+ }
+
+ Poll<absl::optional<T>> PollNext(ObservableVersion* version_seen) {
+ MutexLock lock(&mu_);
+ if (!NextValueReady(version_seen)) return Pending();
+ return value_;
+ }
+
+ Poll<absl::optional<T>> PollWatch(ObservableVersion* version_seen) {
+ if (*version_seen == kTombstoneVersion) return Pending();
+
+ MutexLock lock(&mu_);
+ if (!NextValueReady(version_seen)) return Pending();
+ // Watch needs to be woken up if the value changes even if it's ready now.
+ waiters_.AddPending(Activity::current()->MakeNonOwningWaker());
+ return value_;
+ }
+
+ private:
+ // Returns true if an initial value is set.
+ // If one is not set, add ourselves as pending to waiters_, and return false.
+ bool Started() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ if (!value_.has_value()) {
+ if (version_ != kTombstoneVersion) {
+ // We allow initial no-value, which does not indicate closure.
+ waiters_.AddPending(Activity::current()->MakeNonOwningWaker());
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // If no value is ready, add ourselves as pending to waiters_ and return
+ // false.
+ // If the next value is ready, update the last version seen and return true.
+ bool NextValueReady(ObservableVersion* version_seen)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ if (!Started()) return false;
+ if (version_ == *version_seen) {
+ waiters_.AddPending(Activity::current()->MakeNonOwningWaker());
+ return false;
+ }
+ *version_seen = version_;
+ return true;
+ }
+
+ Mutex mu_;
+ WaitSet waiters_ ABSL_GUARDED_BY(mu_);
+ ObservableVersion version_ ABSL_GUARDED_BY(mu_) = 1;
+ absl::optional<T> value_ ABSL_GUARDED_BY(mu_);
+};
+
+// Promise implementation for Observer::Get.
+template <typename T>
+class ObservableGet {
+ public:
+ ObservableGet(ObservableVersion* version_seen, ObservableState<T>* state)
+ : version_seen_(version_seen), state_(state) {}
+
+ Poll<absl::optional<T>> operator()() {
+ return state_->PollGet(version_seen_);
+ }
+
+ private:
+ ObservableVersion* version_seen_;
+ ObservableState<T>* state_;
+};
+
+// Promise implementation for Observer::Next.
+template <typename T>
+class ObservableNext {
+ public:
+ ObservableNext(ObservableVersion* version_seen, ObservableState<T>* state)
+ : version_seen_(version_seen), state_(state) {}
+
+ Poll<absl::optional<T>> operator()() {
+ return state_->PollNext(version_seen_);
+ }
+
+ private:
+ ObservableVersion* version_seen_;
+ ObservableState<T>* state_;
+};
+
+template <typename T, typename F>
+class ObservableWatch final : private WatchCommitter {
+ private:
+ using Promise = PromiseLike<decltype(std::declval<F>()(
+ std::declval<T>(), std::declval<WatchCommitter*>()))>;
+ using Result = typename Promise::Result;
+
+ public:
+ explicit ObservableWatch(F factory, std::shared_ptr<ObservableState<T>> state)
+ : state_(std::move(state)), factory_(std::move(factory)) {}
+ ObservableWatch(const ObservableWatch&) = delete;
+ ObservableWatch& operator=(const ObservableWatch&) = delete;
+ ObservableWatch(ObservableWatch&& other) noexcept
+ : state_(std::move(other.state_)),
+ promise_(std::move(other.promise_)),
+ factory_(std::move(other.factory_)) {}
+ ObservableWatch& operator=(ObservableWatch&&) noexcept = default;
+
+ Poll<Result> operator()() {
+ auto r = state_->PollWatch(&version_seen_);
+ if (auto* p = r.value_if_ready()) {
+ if (p->has_value()) {
+ promise_ = Promise(factory_(std::move(**p), this));
+ } else {
+ promise_ = {};
+ }
+ }
+ if (promise_.has_value()) {
+ return (*promise_)();
+ } else {
+ return Pending();
+ }
+ }
+
+ private:
+ std::shared_ptr<ObservableState<T>> state_;
+ absl::optional<Promise> promise_;
+ F factory_;
+};
+
+} // namespace promise_detail
+
+template <typename T>
+class Observable;
+
+// Observer watches an Observable for updates.
+// It can see either the latest value or wait for a new value, but is not
+// guaranteed to see every value pushed to the Observable.
+template <typename T>
+class Observer {
+ public:
+ Observer(const Observer&) = delete;
+ Observer& operator=(const Observer&) = delete;
+ Observer(Observer&& other) noexcept
+ : version_seen_(other.version_seen_), state_(std::move(other.state_)) {}
+ Observer& operator=(Observer&& other) noexcept {
+ version_seen_ = other.version_seen_;
+ state_ = std::move(other.state_);
+ return *this;
+ }
+
+ // Return a promise that will produce an optional<T>.
+ // If the Observable is still present, this will be a value T, but if the
+ // Observable has been closed, this will be nullopt. Borrows data from the
+ // Observer, so this value must stay valid until the promise is resolved. Only
+ // one Next, Get call is allowed to be outstanding at a time.
+ promise_detail::ObservableGet<T> Get() {
+ return promise_detail::ObservableGet<T>{&version_seen_, &*state_};
+ }
+
+ // Return a promise that will produce the next unseen value as an optional<T>.
+ // If the Observable is still present, this will be a value T, but if the
+ // Observable has been closed, this will be nullopt. Borrows data from the
+ // Observer, so this value must stay valid until the promise is resolved. Only
+ // one Next, Get call is allowed to be outstanding at a time.
+ promise_detail::ObservableNext<T> Next() {
+ return promise_detail::ObservableNext<T>{&version_seen_, &*state_};
+ }
+
+ private:
+ using State = promise_detail::ObservableState<T>;
+ friend class Observable<T>;
+ explicit Observer(std::shared_ptr<State> state) : state_(state) {}
+ promise_detail::ObservableVersion version_seen_ = 0;
+ std::shared_ptr<State> state_;
+};
+
+// Observable models a single writer multiple reader broadcast channel.
+// Readers can observe the latest value, or await a new latest value, but they
+// are not guaranteed to observe every value.
+template <typename T>
+class Observable {
+ public:
+ Observable() : state_(std::make_shared<State>(absl::nullopt)) {}
+ explicit Observable(T value)
+ : state_(std::make_shared<State>(std::move(value))) {}
+ ~Observable() { state_->Close(); }
+ Observable(const Observable&) = delete;
+ Observable& operator=(const Observable&) = delete;
+
+ // Push a new value into the observable.
+ void Push(T value) { state_->Push(std::move(value)); }
+
+ // Create a new Observer - which can pull the current state from this
+ // Observable.
+ Observer<T> MakeObserver() { return Observer<T>(state_); }
+
+ // Create a new Watch - a promise that pushes state into the passed in promise
+ // factory. The promise factory takes two parameters - the current value and a
+ // commit token. If the commit token is used (the Commit function on it is
+ // called), then no further Watch updates are provided.
+ template <typename F>
+ promise_detail::ObservableWatch<T, F> Watch(F f) {
+ return promise_detail::ObservableWatch<T, F>(std::move(f), state_);
+ }
+
+ private:
+ using State = promise_detail::ObservableState<T>;
+ std::shared_ptr<State> state_;
+};
+
+} // namespace grpc_core
+
+#endif // GRPC_SRC_CORE_LIB_PROMISE_OBSERVABLE_H
diff --git a/src/core/lib/promise/party.cc b/src/core/lib/promise/party.cc
index 5384d9e..98c9ea2 100644
--- a/src/core/lib/promise/party.cc
+++ b/src/core/lib/promise/party.cc
@@ -21,6 +21,8 @@
#include <algorithm>
#include <atomic>
#include <initializer_list>
+#include <memory>
+#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
@@ -35,13 +37,6 @@
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/trace.h"
-// #define GRPC_PARTY_MAXIMIZE_THREADS
-
-#ifdef GRPC_PARTY_MAXIMIZE_THREADS
-#include "src/core/lib/gprpp/thd.h" // IWYU pragma: keep
-#include "src/core/lib/iomgr/exec_ctx.h" // IWYU pragma: keep
-#endif
-
namespace grpc_core {
// Weak handle to a Party.
@@ -64,7 +59,7 @@
// Activity needs to wake up (if it still exists!) - wake it up, and drop the
// ref that was kept for this handle.
- void Wakeup(WakeupMask wakeup_mask) override ABSL_LOCKS_EXCLUDED(mu_) {
+ void Wakeup(void* arg) override ABSL_LOCKS_EXCLUDED(mu_) {
mu_.Lock();
// Note that activity refcount can drop to zero, but we could win the lock
// against DropActivity, so we need to only increase activities refcount if
@@ -74,7 +69,7 @@
mu_.Unlock();
// Activity still exists and we have a reference: wake it up, which will
// drop the ref.
- party->Wakeup(wakeup_mask);
+ party->Wakeup(reinterpret_cast<void*>(arg));
} else {
// Could not get the activity - it's either gone or going. No need to wake
// it up!
@@ -84,9 +79,9 @@
Unref();
}
- void Drop(WakeupMask) override { Unref(); }
+ void Drop(void*) override { Unref(); }
- std::string ActivityDebugTag(WakeupMask) const override {
+ std::string ActivityDebugTag(void*) const override {
MutexLock lock(&mu_);
return party_ == nullptr ? "<unknown>" : party_->DebugTag();
}
@@ -121,17 +116,15 @@
}
}
-Party::~Party() {}
-
-void Party::IncrementRefCount(DebugLocation whence) {
- auto prev_state = state_.fetch_add(kOneRef, std::memory_order_relaxed);
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "%s[party] Ref: prev_state=%s from %s:%d",
- DebugTag().c_str(), StateToString(prev_state).c_str(),
- whence.file(), whence.line());
- }
+Party::~Party() {
+ participants_.clear();
+ arena_->Destroy();
}
+void Party::Orphan() { Unref(); }
+
+void Party::Ref() { state_.fetch_add(kOneRef, std::memory_order_relaxed); }
+
bool Party::RefIfNonZero() {
auto count = state_.load(std::memory_order_relaxed);
do {
@@ -147,120 +140,56 @@
return true;
}
-void Party::Unref(DebugLocation whence) {
- uint64_t prev_state;
- auto do_unref = [&prev_state, this]() {
- prev_state = state_.fetch_sub(kOneRef, std::memory_order_acq_rel);
- };
- if (grpc_trace_promise_primitives.enabled()) {
- auto debug_tag = DebugTag();
- do_unref();
- gpr_log(GPR_DEBUG, "%s[party] Unref: prev_state=%s from %s:%d",
- debug_tag.c_str(), StateToString(prev_state).c_str(), whence.file(),
- whence.line());
- } else {
- do_unref();
+void Party::Unref() {
+ auto prev = state_.fetch_sub(kOneRef, std::memory_order_acq_rel);
+ if (prev == kOneRef) {
+ delete this;
}
- if ((prev_state & kRefMask) == kOneRef) {
- prev_state =
- state_.fetch_or(kDestroying | kLocked, std::memory_order_acq_rel);
- if (prev_state & kLocked) {
- // Already locked: RunParty will call PartyOver.
- } else {
- ScopedActivity activity(this);
- PartyOver();
- }
- return;
- }
+ GPR_DEBUG_ASSERT((prev & kRefMask) != 0);
}
-void Party::CancelRemainingParticipants() {
- ScopedActivity activity(this);
- promise_detail::Context<Arena> arena_ctx(arena_);
- for (size_t i = 0; i < kMaxParticipants; i++) {
- if (auto* p =
- participants_[i].exchange(nullptr, std::memory_order_acquire)) {
- p->Destroy();
- }
- }
-}
-
-std::string Party::ActivityDebugTag(WakeupMask wakeup_mask) const {
- return absl::StrFormat("%s [parts:%x]", DebugTag(), wakeup_mask);
+std::string Party::ActivityDebugTag(void* arg) const {
+ return absl::StrFormat("%s/%p", DebugTag(), arg);
}
Waker Party::MakeOwningWaker() {
GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
- IncrementRefCount();
- return Waker(this, 1u << currently_polling_);
+ Ref();
+ return Waker(this, reinterpret_cast<void*>(currently_polling_));
}
Waker Party::MakeNonOwningWaker() {
GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
- return Waker(participants_[currently_polling_]
- .load(std::memory_order_relaxed)
- ->MakeNonOwningWakeable(this),
- 1u << currently_polling_);
+ return Waker(participants_[currently_polling_]->MakeNonOwningWakeable(this),
+ reinterpret_cast<void*>(currently_polling_));
}
-void Party::ForceImmediateRepoll(WakeupMask mask) {
- GPR_DEBUG_ASSERT(is_current());
+void Party::ForceImmediateRepoll() {
+ GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
// Or in the bit for the currently polling participant.
// Will be grabbed next round to force a repoll of this promise.
- auto prev_state =
- state_.fetch_or(mask & kWakeupMask, std::memory_order_relaxed);
-
- if (grpc_trace_promise_primitives.enabled()) {
- std::vector<int> wakeups;
- for (size_t i = 0; i < 8 * sizeof(WakeupMask); i++) {
- if (mask & (1 << i)) wakeups.push_back(i);
- }
- gpr_log(GPR_DEBUG, "%s[party] ForceImmediateRepoll({%s}): prev_state=%s",
- DebugTag().c_str(), absl::StrJoin(wakeups, ",").c_str(),
- StateToString(prev_state).c_str());
- }
+ state_.fetch_or(1 << currently_polling_, std::memory_order_relaxed);
}
-void Party::RunLocked() {
- auto body = [this]() {
- if (RunParty()) {
- ScopedActivity activity(this);
- PartyOver();
- }
- };
-#ifdef GRPC_PARTY_MAXIMIZE_THREADS
- Thread thd(
- "RunParty",
- [body]() {
- ApplicationCallbackExecCtx app_exec_ctx;
- ExecCtx exec_ctx;
- body();
- },
- nullptr, Thread::Options().set_joinable(false));
- thd.Start();
-#else
- body();
-#endif
-}
-
-bool Party::RunParty() {
+void Party::Run() {
ScopedActivity activity(this);
- promise_detail::Context<Arena> arena_ctx(arena_);
uint64_t prev_state;
do {
// Grab the current state, and clear the wakeup bits & add flag.
- prev_state = state_.fetch_and(kRefMask | kLocked | kAllocatedMask,
- std::memory_order_acquire);
+ prev_state =
+ state_.fetch_and(kRefMask | kLocked, std::memory_order_acquire);
if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "%s[party] Run prev_state=%s", DebugTag().c_str(),
+ gpr_log(GPR_DEBUG, "Party::Run(): prev_state=%s",
StateToString(prev_state).c_str());
}
- GPR_ASSERT(prev_state & kLocked);
- if (prev_state & kDestroying) return true;
// From the previous state, extract which participants we're to wakeup.
uint64_t wakeups = prev_state & kWakeupMask;
+ // If there were adds pending, drain them.
+ // We pass in wakeups here so that the new participants are polled
+ // immediately (draining will situate them).
+ if (prev_state & kAddsPending) DrainAdds(wakeups);
// Now update prev_state to be what we want the CAS to see below.
- prev_state &= kRefMask | kLocked | kAllocatedMask;
+ prev_state &= kRefMask | kLocked;
// For each wakeup bit...
for (size_t i = 0; wakeups != 0; i++, wakeups >>= 1) {
// If the bit is not set, skip.
@@ -268,35 +197,10 @@
// If the participant is null, skip.
// This allows participants to complete whilst wakers still exist
// somewhere.
- auto* participant = participants_[i].load(std::memory_order_acquire);
- if (participant == nullptr) {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "%s[party] wakeup %" PRIdPTR " already complete",
- DebugTag().c_str(), i);
- }
- continue;
- }
- absl::string_view name;
- if (grpc_trace_promise_primitives.enabled()) {
- name = participant->name();
- gpr_log(GPR_DEBUG, "%s[%s] begin job %" PRIdPTR, DebugTag().c_str(),
- std::string(name).c_str(), i);
- }
+ if (participants_[i] == nullptr) continue;
// Poll the participant.
currently_polling_ = i;
- if (participant->Poll()) {
- if (!name.empty()) {
- gpr_log(GPR_DEBUG, "%s[%s] end poll and finish job %" PRIdPTR,
- DebugTag().c_str(), std::string(name).c_str(), i);
- }
- participants_[i] = nullptr;
- const uint64_t allocated_bit = (1u << i << kAllocatedShift);
- prev_state &= ~allocated_bit;
- state_.fetch_and(~allocated_bit, std::memory_order_release);
- } else if (!name.empty()) {
- gpr_log(GPR_DEBUG, "%s[%s] end poll", DebugTag().c_str(),
- std::string(name).c_str());
- }
+ if (participants_[i]->Poll()) participants_[i].reset();
currently_polling_ = kNotPolling;
}
// Try to CAS the state we expected to have (with no wakeups or adds)
@@ -310,100 +214,106 @@
// TODO(ctiller): consider mitigations for the accidental wakeup on owning
// waker creation case -- I currently expect this will be more expensive
// than this quick loop.
- } while (!state_.compare_exchange_weak(
- prev_state, (prev_state & (kRefMask | kAllocatedMask)),
- std::memory_order_acq_rel, std::memory_order_acquire));
- return false;
+ } while (!state_.compare_exchange_weak(prev_state, (prev_state & kRefMask),
+ std::memory_order_acq_rel,
+ std::memory_order_acquire));
}
-void Party::AddParticipant(Participant* participant) {
- uint64_t state = state_.load(std::memory_order_acquire);
- uint64_t allocated;
-
- int slot;
-
- // Find slots for each new participant, ordering them from lowest available
- // slot upwards to ensure the same poll ordering as presentation ordering to
- // this function.
- do {
- slot = -1;
- allocated = (state & kAllocatedMask) >> kAllocatedShift;
- for (size_t bit = 0; bit < kMaxParticipants; bit++) {
- if (allocated & (1 << bit)) continue;
- slot = bit;
- allocated |= 1 << bit;
- break;
- }
- GPR_ASSERT(slot != -1);
- } while (!state_.compare_exchange_weak(
- state, state | (allocated << kAllocatedShift), std::memory_order_acq_rel,
- std::memory_order_acquire));
-
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "%s[party] Welcome %s@%d", DebugTag().c_str(),
- std::string(participant->name()).c_str(), slot);
+void Party::DrainAdds(uint64_t& wakeups) {
+ // Grab the list of adds.
+ AddingParticipant* adding =
+ adding_.exchange(nullptr, std::memory_order_acquire);
+ // For each add, situate it and add it to the wakeup mask.
+ while (adding != nullptr) {
+ wakeups |= 1 << SituateNewParticipant(std::move(adding->participant));
+ // Don't leak the add request.
+ delete std::exchange(adding, adding->next);
}
-
- // We've allocated the slot, next we need to populate it.
- // Once we do so however a spurious wakeup could occur, and that wakeup might
- // release the last ref.
- // We need to hold one here.
- auto ref = Ref();
- participants_[slot].store(participant, std::memory_order_release);
-
- // Now we need to wake up the party.
- state = state_.fetch_or((1 << slot) | kLocked, std::memory_order_relaxed);
-
- // If the party was already locked, we're done.
- if ((state & kLocked) != 0) return;
-
- // Otherwise, we need to run the party.
- RunLocked();
}
-void Party::ScheduleWakeup(WakeupMask mask) {
- // Or in the wakeup bit for the participant, AND the locked bit.
- uint64_t prev_state = state_.fetch_or((mask & kWakeupMask) | kLocked,
- std::memory_order_acquire);
+void Party::AddParticipant(Arena::PoolPtr<Participant> participant) {
+ // Lock
+ auto prev_state = state_.fetch_or(kLocked, std::memory_order_acquire);
if (grpc_trace_promise_primitives.enabled()) {
- std::vector<int> wakeups;
- for (size_t i = 0; i < 8 * sizeof(WakeupMask); i++) {
- if (mask & (1 << i)) wakeups.push_back(i);
- }
- gpr_log(GPR_DEBUG, "%s[party] ScheduleWakeup({%s}): prev_state=%s",
- DebugTag().c_str(), absl::StrJoin(wakeups, ",").c_str(),
+ gpr_log(GPR_DEBUG, "Party::AddParticipant(): prev_state=%s",
StateToString(prev_state).c_str());
}
- // If the lock was not held now we hold it, so we need to run.
- if ((prev_state & kLocked) == 0) RunLocked();
+ if ((prev_state & kLocked) == 0) {
+ // Lock acquired
+ state_.fetch_or(1 << SituateNewParticipant(std::move(participant)),
+ std::memory_order_relaxed);
+ Run();
+ return;
+ }
+ // Already locked: add to the list of things to add
+ auto* add = new AddingParticipant{std::move(participant), nullptr};
+ while (!adding_.compare_exchange_weak(
+ add->next, add, std::memory_order_acq_rel, std::memory_order_acquire)) {
+ }
+ // And signal that there are adds waiting.
+ // This needs to happen after the add above: Run() will examine this bit
+ // first, and then decide to drain the queue - so if the ordering was reversed
+ // it might examine the adds pending bit, and then observe no add to drain.
+ prev_state =
+ state_.fetch_or(kLocked | kAddsPending, std::memory_order_release);
+ if (grpc_trace_promise_primitives.enabled()) {
+ gpr_log(GPR_DEBUG, "Party::AddParticipant(): prev_state=%s",
+ StateToString(prev_state).c_str());
+ }
+ if ((prev_state & kLocked) == 0) {
+ // We queued the add but the lock was released before we signalled that.
+ // We acquired the lock though, so now we can run.
+ Run();
+ }
}
-void Party::Wakeup(WakeupMask wakeup_mask) {
- ScheduleWakeup(wakeup_mask);
+size_t Party::SituateNewParticipant(Arena::PoolPtr<Participant> participant) {
+ // First search for a free index in the participants array.
+ // If we find one, use it.
+ for (size_t i = 0; i < participants_.size(); i++) {
+ if (participants_[i] != nullptr) continue;
+ participants_[i] = std::move(participant);
+ return i;
+ }
+
+ // Otherwise, add it to the end.
+ GPR_ASSERT(participants_.size() < kMaxParticipants);
+ participants_.emplace_back(std::move(participant));
+ return participants_.size() - 1;
+}
+
+void Party::ScheduleWakeup(uint64_t participant_index) {
+ // Or in the wakeup bit for the participant, AND the locked bit.
+ uint64_t prev_state = state_.fetch_or((1 << participant_index) | kLocked,
+ std::memory_order_acquire);
+ if (grpc_trace_promise_primitives.enabled()) {
+ gpr_log(GPR_DEBUG, "Party::ScheduleWakeup(%" PRIu64 "): prev_state=%s",
+ participant_index, StateToString(prev_state).c_str());
+ }
+ // If the lock was not held now we hold it, so we need to run.
+ if ((prev_state & kLocked) == 0) Run();
+}
+
+void Party::Wakeup(void* arg) {
+ ScheduleWakeup(reinterpret_cast<uintptr_t>(arg));
Unref();
}
-void Party::Drop(WakeupMask) { Unref(); }
+void Party::Drop(void*) { Unref(); }
std::string Party::StateToString(uint64_t state) {
std::vector<std::string> parts;
if (state & kLocked) parts.push_back("locked");
- if (state & kDestroying) parts.push_back("over");
+ if (state & kAddsPending) parts.push_back("adds_pending");
parts.push_back(
absl::StrFormat("refs=%" PRIuPTR, (state & kRefMask) >> kRefShift));
- std::vector<int> allocated;
std::vector<int> participants;
for (size_t i = 0; i < kMaxParticipants; i++) {
- if ((state & (1ull << i)) != 0) participants.push_back(i);
- if ((state & (1ull << (i + kAllocatedShift))) != 0) allocated.push_back(i);
- }
- if (!allocated.empty()) {
- parts.push_back(
- absl::StrFormat("allocated={%s}", absl::StrJoin(allocated, ",")));
+ if ((state & (1 << i)) != 0) participants.push_back(i);
}
if (!participants.empty()) {
parts.push_back(
- absl::StrFormat("wakeup={%s}", absl::StrJoin(participants, ",")));
+ absl::StrFormat("wakeup=%s", absl::StrJoin(participants, ",")));
}
return absl::StrCat("{", absl::StrJoin(parts, " "), "}");
}
diff --git a/src/core/lib/promise/party.h b/src/core/lib/promise/party.h
index dcf10ff..3032d05 100644
--- a/src/core/lib/promise/party.h
+++ b/src/core/lib/promise/party.h
@@ -24,17 +24,9 @@
#include <string>
#include <utility>
-#include "absl/strings/string_view.h"
+#include "absl/container/inlined_vector.h"
-#include <grpc/support/log.h>
-
-#include "src/core/lib/gprpp/construct_destruct.h"
-#include "src/core/lib/gprpp/crash.h"
-#include "src/core/lib/gprpp/debug_location.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/activity.h"
-#include "src/core/lib/promise/context.h"
-#include "src/core/lib/promise/detail/promise_factory.h"
#include "src/core/lib/resource_quota/arena.h"
namespace grpc_core {
@@ -42,43 +34,29 @@
// A Party is an Activity with multiple participant promises.
class Party : public Activity, private Wakeable {
public:
+ explicit Party(Arena* arena) : arena_(arena) {}
+
Party(const Party&) = delete;
Party& operator=(const Party&) = delete;
- // Spawn one promise into the party.
+ // Spawn one promise onto the arena.
// The promise will be polled until it is resolved, or until the party is shut
// down.
// The on_complete callback will be called with the result of the promise if
// it completes.
// A maximum of sixteen promises can be spawned onto a party.
- template <typename Factory, typename OnComplete>
- void Spawn(absl::string_view name, Factory promise_factory,
- OnComplete on_complete);
+ template <typename Promise, typename OnComplete>
+ void Spawn(Promise promise, OnComplete on_complete);
- void Orphan() final { Crash("unused"); }
+ void Orphan() final;
// Activity implementation: not allowed to be overridden by derived types.
- void ForceImmediateRepoll(WakeupMask mask) final;
- WakeupMask CurrentParticipant() const final {
- GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
- return 1u << currently_polling_;
- }
+ void ForceImmediateRepoll() final;
Waker MakeOwningWaker() final;
Waker MakeNonOwningWaker() final;
- std::string ActivityDebugTag(WakeupMask wakeup_mask) const final;
-
- void IncrementRefCount(DebugLocation whence = {});
- void Unref(DebugLocation whence = {});
- RefCountedPtr<Party> Ref() {
- IncrementRefCount();
- return RefCountedPtr<Party>(this);
- }
-
- Arena* arena() const { return arena_; }
+ std::string ActivityDebugTag(void* arg) const final;
protected:
- explicit Party(Arena* arena, size_t initial_refs)
- : state_(kOneRef * initial_refs), arena_(arena) {}
~Party() override;
// Main run loop. Must be locked.
@@ -86,17 +64,9 @@
// be done.
// Derived types will likely want to override this to set up their
// contexts before polling.
- // Should not be called by derived types except as a tail call to the base
- // class RunParty when overriding this method to add custom context.
- // Returns true if the party is over.
- virtual bool RunParty() GRPC_MUST_USE_RESULT;
+ virtual void Run();
- bool RefIfNonZero();
-
- // Destroy any remaining participants.
- // Should be called by derived types in response to PartyOver.
- // Needs to have normal context setup before calling.
- void CancelRemainingParticipants();
+ Arena* arena() const { return arena_; }
private:
// Non-owning wakeup handle.
@@ -105,91 +75,67 @@
// One participant in the party.
class Participant {
public:
- explicit Participant(absl::string_view name) : name_(name) {}
+ virtual ~Participant();
// Poll the participant. Return true if complete.
- // Participant should take care of its own deallocation in this case.
virtual bool Poll() = 0;
- // Destroy the participant before finishing.
- virtual void Destroy() = 0;
-
// Return a Handle instance for this participant.
Wakeable* MakeNonOwningWakeable(Party* party);
- absl::string_view name() const { return name_; }
-
- protected:
- ~Participant();
-
private:
Handle* handle_ = nullptr;
- absl::string_view name_;
};
// Concrete implementation of a participant for some promise & oncomplete
// type.
- template <typename SuppliedFactory, typename OnComplete>
+ template <typename Promise, typename OnComplete>
class ParticipantImpl final : public Participant {
- using Factory = promise_detail::OncePromiseFactory<void, SuppliedFactory>;
- using Promise = typename Factory::Promise;
-
public:
- ParticipantImpl(absl::string_view name, SuppliedFactory promise_factory,
- OnComplete on_complete)
- : Participant(name), on_complete_(std::move(on_complete)) {
- Construct(&factory_, std::move(promise_factory));
- }
- ~ParticipantImpl() {
- if (!started_) {
- Destruct(&factory_);
- } else {
- Destruct(&promise_);
- }
- }
+ ParticipantImpl(Promise promise, OnComplete on_complete)
+ : promise_(std::move(promise)), on_complete_(std::move(on_complete)) {}
bool Poll() override {
- if (!started_) {
- auto p = factory_.Make();
- Destruct(&factory_);
- Construct(&promise_, std::move(p));
- started_ = true;
- }
auto p = promise_();
if (auto* r = p.value_if_ready()) {
on_complete_(std::move(*r));
- GetContext<Arena>()->DeletePooled(this);
return true;
}
return false;
}
- void Destroy() override { GetContext<Arena>()->DeletePooled(this); }
-
private:
- union {
- GPR_NO_UNIQUE_ADDRESS Factory factory_;
- GPR_NO_UNIQUE_ADDRESS Promise promise_;
- };
+ GPR_NO_UNIQUE_ADDRESS Promise promise_;
GPR_NO_UNIQUE_ADDRESS OnComplete on_complete_;
- bool started_ = false;
};
- // Notification that the party has finished and this instance can be deleted.
- // Derived types should arrange to call CancelRemainingParticipants during
- // this sequence.
- virtual void PartyOver() = 0;
-
- // Run the locked part of the party until it is unlocked.
- void RunLocked();
+ // One participant that's been spawned, but has not yet made it into
+ // participants_.
+ // Since it's impossible to block on locking this type, we form a queue of
+ // participants waiting and drain that prior to polling.
+ struct AddingParticipant {
+ Arena::PoolPtr<Participant> participant;
+ AddingParticipant* next;
+ };
// Wakeable implementation
- void Wakeup(WakeupMask wakeup_mask) final;
- void Drop(WakeupMask wakeup_mask) final;
+ void Wakeup(void* arg) final;
+ void Drop(void* arg) final;
- // Organize to wake up some participants.
- void ScheduleWakeup(WakeupMask mask);
- // Add a participant (backs Spawn, after type erasure to ParticipantFactory).
- void AddParticipant(Participant* participant);
+ // Internal ref counting
+ void Ref();
+ bool RefIfNonZero();
+ void Unref();
+
+ // Organize to wake up one participant.
+ void ScheduleWakeup(uint64_t participant_index);
+ // Start adding a participant to the party.
+ // Backs Spawn() after type erasure.
+ void AddParticipant(Arena::PoolPtr<Participant> participant);
+ // Drain the add queue.
+ void DrainAdds(uint64_t& wakeups);
+ // Take a new participant, and add it to the participants_ array.
+ // Returns the index of the participant in the array.
+ size_t SituateNewParticipant(Arena::PoolPtr<Participant> new_participant);
// Convert a state into a string.
static std::string StateToString(uint64_t state);
@@ -212,41 +158,34 @@
// clang-format off
// Bits used to store 16 bits of wakeups
- static constexpr uint64_t kWakeupMask = 0x0000'0000'0000'ffff;
- // Bits used to store 16 bits of allocated participant slots.
- static constexpr uint64_t kAllocatedMask = 0x0000'0000'ffff'0000;
- // Bit indicating destruction has begun (refs went to zero)
- static constexpr uint64_t kDestroying = 0x0000'0001'0000'0000;
+ static constexpr uint64_t kWakeupMask = 0x0000'0000'0000'ffff;
// Bit indicating locked or not
- static constexpr uint64_t kLocked = 0x0000'0008'0000'0000;
+ static constexpr uint64_t kLocked = 0x0000'0000'0100'0000;
+ // Bit indicating whether there are adds pending
+ static constexpr uint64_t kAddsPending = 0x0000'0000'1000'0000;
// Bits used to store 24 bits of ref counts
- static constexpr uint64_t kRefMask = 0xffff'ff00'0000'0000;
+ static constexpr uint64_t kRefMask = 0xffff'ff00'0000'0000;
// clang-format on
- // Shift to get from a participant mask to an allocated mask.
- static constexpr size_t kAllocatedShift = 16;
+ // Number of bits reserved for wakeups gives us the maximum number of
+ // participants.
+ static constexpr size_t kMaxParticipants = 16;
// How far to shift to get the refcount
static constexpr size_t kRefShift = 40;
// One ref count
static constexpr uint64_t kOneRef = 1ull << kRefShift;
- // Number of bits reserved for wakeups gives us the maximum number of
- // participants.
- static constexpr size_t kMaxParticipants = 16;
- std::atomic<uint64_t> state_;
Arena* const arena_;
+ absl::InlinedVector<Arena::PoolPtr<Participant>, 1> participants_;
+ std::atomic<uint64_t> state_{kOneRef};
+ std::atomic<AddingParticipant*> adding_{nullptr};
uint8_t currently_polling_ = kNotPolling;
- // All current participants, using a tagged format.
- // If the lower bit is unset, then this is a Participant*.
- // If the lower bit is set, then this is a ParticipantFactory*.
- std::atomic<Participant*> participants_[kMaxParticipants] = {};
};
-template <typename Factory, typename OnComplete>
-void Party::Spawn(absl::string_view name, Factory promise_factory,
- OnComplete on_complete) {
- AddParticipant(arena_->NewPooled<ParticipantImpl<Factory, OnComplete>>(
- name, std::move(promise_factory), std::move(on_complete)));
+template <typename Promise, typename OnComplete>
+void Party::Spawn(Promise promise, OnComplete on_complete) {
+ AddParticipant(arena_->MakePooled<ParticipantImpl<Promise, OnComplete>>(
+ std::move(promise), std::move(on_complete)));
}
} // namespace grpc_core
diff --git a/src/core/lib/promise/pipe.h b/src/core/lib/promise/pipe.h
index e989b5c..c3ec5ad 100644
--- a/src/core/lib/promise/pipe.h
+++ b/src/core/lib/promise/pipe.h
@@ -25,6 +25,7 @@
#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
@@ -38,6 +39,7 @@
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/if.h"
#include "src/core/lib/promise/interceptor_list.h"
+#include "src/core/lib/promise/intra_activity_waiter.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/seq.h"
@@ -158,11 +160,9 @@
case ValueState::kClosed:
case ValueState::kReadyClosed:
case ValueState::kCancelled:
- case ValueState::kWaitingForAckAndClosed:
return false;
case ValueState::kReady:
case ValueState::kAcked:
- case ValueState::kWaitingForAck:
return on_empty_.pending();
case ValueState::kEmpty:
value_state_ = ValueState::kReady;
@@ -180,14 +180,11 @@
GPR_DEBUG_ASSERT(refs_ != 0);
switch (value_state_) {
case ValueState::kClosed:
- return true;
+ case ValueState::kReadyClosed:
case ValueState::kCancelled:
return false;
case ValueState::kReady:
- case ValueState::kReadyClosed:
case ValueState::kEmpty:
- case ValueState::kWaitingForAck:
- case ValueState::kWaitingForAckAndClosed:
return on_empty_.pending();
case ValueState::kAcked:
value_state_ = ValueState::kEmpty;
@@ -209,14 +206,12 @@
switch (value_state_) {
case ValueState::kEmpty:
case ValueState::kAcked:
- case ValueState::kWaitingForAck:
- case ValueState::kWaitingForAckAndClosed:
return on_full_.pending();
case ValueState::kReadyClosed:
- value_state_ = ValueState::kWaitingForAckAndClosed;
- return std::move(value_);
+ this->ResetInterceptorList();
+ value_state_ = ValueState::kClosed;
+ ABSL_FALLTHROUGH_INTENDED;
case ValueState::kReady:
- value_state_ = ValueState::kWaitingForAck;
return std::move(value_);
case ValueState::kClosed:
case ValueState::kCancelled:
@@ -225,89 +220,18 @@
GPR_UNREACHABLE_CODE(return absl::nullopt);
}
- // Check if the pipe is closed for sending (if there is a value still queued
- // but the pipe is closed, reports closed).
- Poll<bool> PollClosedForSender() {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%s", DebugOpString("PollClosedForSender").c_str());
- }
- GPR_DEBUG_ASSERT(refs_ != 0);
- switch (value_state_) {
- case ValueState::kEmpty:
- case ValueState::kAcked:
- case ValueState::kReady:
- case ValueState::kWaitingForAck:
- return on_closed_.pending();
- case ValueState::kWaitingForAckAndClosed:
- case ValueState::kReadyClosed:
- case ValueState::kClosed:
- return false;
- case ValueState::kCancelled:
- return true;
- }
- GPR_UNREACHABLE_CODE(return true);
- }
-
- // Check if the pipe is closed for receiving (if there is a value still queued
- // but the pipe is closed, reports open).
- Poll<bool> PollClosedForReceiver() {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%s", DebugOpString("PollClosedForReceiver").c_str());
- }
- GPR_DEBUG_ASSERT(refs_ != 0);
- switch (value_state_) {
- case ValueState::kEmpty:
- case ValueState::kAcked:
- case ValueState::kReady:
- case ValueState::kReadyClosed:
- case ValueState::kWaitingForAck:
- case ValueState::kWaitingForAckAndClosed:
- return on_closed_.pending();
- case ValueState::kClosed:
- return false;
- case ValueState::kCancelled:
- return true;
- }
- GPR_UNREACHABLE_CODE(return true);
- }
-
- Poll<Empty> PollEmpty() {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_INFO, "%s", DebugOpString("PollEmpty").c_str());
- }
- GPR_DEBUG_ASSERT(refs_ != 0);
- switch (value_state_) {
- case ValueState::kReady:
- case ValueState::kReadyClosed:
- return on_empty_.pending();
- case ValueState::kWaitingForAck:
- case ValueState::kWaitingForAckAndClosed:
- case ValueState::kAcked:
- case ValueState::kEmpty:
- case ValueState::kClosed:
- case ValueState::kCancelled:
- return Empty{};
- }
- GPR_UNREACHABLE_CODE(return Empty{});
- }
-
void AckNext() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%s", DebugOpString("AckNext").c_str());
}
switch (value_state_) {
case ValueState::kReady:
- case ValueState::kWaitingForAck:
value_state_ = ValueState::kAcked;
on_empty_.Wake();
break;
case ValueState::kReadyClosed:
- case ValueState::kWaitingForAckAndClosed:
this->ResetInterceptorList();
value_state_ = ValueState::kClosed;
- on_closed_.Wake();
- on_empty_.Wake();
- on_full_.Wake();
break;
case ValueState::kClosed:
case ValueState::kCancelled:
@@ -327,22 +251,14 @@
case ValueState::kAcked:
this->ResetInterceptorList();
value_state_ = ValueState::kClosed;
- on_empty_.Wake();
on_full_.Wake();
- on_closed_.Wake();
break;
case ValueState::kReady:
value_state_ = ValueState::kReadyClosed;
- on_closed_.Wake();
- break;
- case ValueState::kWaitingForAck:
- value_state_ = ValueState::kWaitingForAckAndClosed;
- on_closed_.Wake();
break;
case ValueState::kReadyClosed:
case ValueState::kClosed:
case ValueState::kCancelled:
- case ValueState::kWaitingForAckAndClosed:
break;
}
}
@@ -356,15 +272,13 @@
case ValueState::kAcked:
case ValueState::kReady:
case ValueState::kReadyClosed:
- case ValueState::kWaitingForAck:
- case ValueState::kWaitingForAckAndClosed:
this->ResetInterceptorList();
value_state_ = ValueState::kCancelled;
- on_empty_.Wake();
on_full_.Wake();
- on_closed_.Wake();
break;
case ValueState::kClosed:
+ value_state_ = ValueState::kCancelled;
+ break;
case ValueState::kCancelled:
break;
}
@@ -391,8 +305,6 @@
kEmpty,
// Value has been pushed but not acked, it's possible to receive.
kReady,
- // Value has been read and not acked, both send/receive blocked until ack.
- kWaitingForAck,
// Value has been received and acked, we can unblock senders and transition
// to empty.
kAcked,
@@ -401,9 +313,6 @@
// Pipe is closed successfully, no more values can be sent
// (but one value is queued and ready to be received)
kReadyClosed,
- // Pipe is closed successfully, no more values can be sent
- // (but one value is queued and waiting to be acked)
- kWaitingForAckAndClosed,
// Pipe is closed unsuccessfully, no more values can be sent
kCancelled,
};
@@ -412,8 +321,7 @@
return absl::StrCat(DebugTag(), op, " refs=", refs_,
" value_state=", ValueStateName(value_state_),
" on_empty=", on_empty_.DebugString().c_str(),
- " on_full=", on_full_.DebugString().c_str(),
- " on_closed=", on_closed_.DebugString().c_str());
+ " on_full=", on_full_.DebugString().c_str());
}
static const char* ValueStateName(ValueState state) {
@@ -428,10 +336,6 @@
return "Closed";
case ValueState::kReadyClosed:
return "ReadyClosed";
- case ValueState::kWaitingForAck:
- return "WaitingForAck";
- case ValueState::kWaitingForAckAndClosed:
- return "WaitingForAckAndClosed";
case ValueState::kCancelled:
return "Cancelled";
}
@@ -445,7 +349,6 @@
ValueState value_state_;
IntraActivityWaiter on_empty_;
IntraActivityWaiter on_full_;
- IntraActivityWaiter on_closed_;
// Make failure to destruct show up in ASAN builds.
#ifndef NDEBUG
@@ -485,25 +388,11 @@
// receiver is either closed or able to receive another message.
PushType Push(T value);
- // Return a promise that resolves when the receiver is closed.
- // The resolved value is a bool - true if the pipe was cancelled, false if it
- // was closed successfully.
- // Checks closed from the senders perspective: that is, if there is a value in
- // the pipe but the pipe is closed, reports closed.
- auto AwaitClosed() {
- return [center = center_]() { return center->PollClosedForSender(); };
- }
-
- // Interject PromiseFactory f into the pipeline.
- // f will be called with the current value traversing the pipe, and should
- // return a value to replace it with.
- // Interjects at the Push end of the pipe.
template <typename Fn>
void InterceptAndMap(Fn f, DebugLocation from = {}) {
center_->PrependMap(std::move(f), from);
}
- // Per above, but calls cleanup_fn when the pipe is closed.
template <typename Fn, typename OnHalfClose>
void InterceptAndMap(Fn f, OnHalfClose cleanup_fn, DebugLocation from = {}) {
center_->PrependMapWithCleanup(std::move(f), std::move(cleanup_fn), from);
@@ -520,31 +409,6 @@
#endif
};
-template <typename T>
-class PipeReceiver;
-
-namespace pipe_detail {
-
-// Implementation of PipeReceiver::Next promise.
-template <typename T>
-class Next {
- public:
- Next(const Next&) = delete;
- Next& operator=(const Next&) = delete;
- Next(Next&& other) noexcept = default;
- Next& operator=(Next&& other) noexcept = default;
-
- Poll<absl::optional<T>> operator()() { return center_->Next(); }
-
- private:
- friend class PipeReceiver<T>;
- explicit Next(RefCountedPtr<Center<T>> center) : center_(std::move(center)) {}
-
- RefCountedPtr<Center<T>> center_;
-};
-
-} // namespace pipe_detail
-
// Receive end of a Pipe.
template <typename T>
class PipeReceiver {
@@ -554,7 +418,7 @@
PipeReceiver(PipeReceiver&& other) noexcept = default;
PipeReceiver& operator=(PipeReceiver&& other) noexcept = default;
~PipeReceiver() {
- if (center_ != nullptr) center_->MarkCancelled();
+ if (center_ != nullptr) center_->MarkClosed();
}
void Swap(PipeReceiver<T>* other) { std::swap(center_, other->center_); }
@@ -564,55 +428,13 @@
// message was received, or no value if the other end of the pipe was closed.
// Blocks the promise until the receiver is either closed or a message is
// available.
- auto Next() {
- return Seq(
- pipe_detail::Next<T>(center_->Ref()),
- [center = center_->Ref()](absl::optional<T> value) {
- bool open = value.has_value();
- bool cancelled = center->cancelled();
- return If(
- open,
- [center = std::move(center), value = std::move(value)]() mutable {
- auto run = center->Run(std::move(value));
- return Map(std::move(run),
- [center = std::move(center)](
- absl::optional<T> value) mutable {
- if (value.has_value()) {
- center->value() = std::move(*value);
- return NextResult<T>(std::move(center));
- } else {
- center->MarkCancelled();
- return NextResult<T>(true);
- }
- });
- },
- [cancelled]() { return NextResult<T>(cancelled); });
- });
- }
+ auto Next();
- // Return a promise that resolves when the receiver is closed.
- // The resolved value is a bool - true if the pipe was cancelled, false if it
- // was closed successfully.
- // Checks closed from the receivers perspective: that is, if there is a value
- // in the pipe but the pipe is closed, reports open until that value is read.
- auto AwaitClosed() {
- return [center = center_]() { return center->PollClosedForReceiver(); };
- }
-
- auto AwaitEmpty() {
- return [center = center_]() { return center->PollEmpty(); };
- }
-
- // Interject PromiseFactory f into the pipeline.
- // f will be called with the current value traversing the pipe, and should
- // return a value to replace it with.
- // Interjects at the Next end of the pipe.
template <typename Fn>
void InterceptAndMap(Fn f, DebugLocation from = {}) {
center_->AppendMap(std::move(f), from);
}
- // Per above, but calls cleanup_fn when the pipe is closed.
template <typename Fn, typename OnHalfClose>
void InterceptAndMapWithHalfClose(Fn f, OnHalfClose cleanup_fn,
DebugLocation from = {}) {
@@ -637,19 +459,12 @@
class Push {
public:
Push(const Push&) = delete;
-
Push& operator=(const Push&) = delete;
Push(Push&& other) noexcept = default;
Push& operator=(Push&& other) noexcept = default;
Poll<bool> operator()() {
- if (center_ == nullptr) {
- if (grpc_trace_promise_primitives.enabled()) {
- gpr_log(GPR_DEBUG, "%s Pipe push has a null center",
- Activity::current()->DebugTag().c_str());
- }
- return false;
- }
+ if (center_ == nullptr) return false;
if (auto* p = absl::get_if<T>(&state_)) {
auto r = center_->Push(p);
if (auto* ok = r.value_if_ready()) {
@@ -674,6 +489,24 @@
absl::variant<T, AwaitingAck> state_;
};
+// Implementation of PipeReceiver::Next promise.
+template <typename T>
+class Next {
+ public:
+ Next(const Next&) = delete;
+ Next& operator=(const Next&) = delete;
+ Next(Next&& other) noexcept = default;
+ Next& operator=(Next&& other) noexcept = default;
+
+ Poll<absl::optional<T>> operator()() { return center_->Next(); }
+
+ private:
+ friend class PipeReceiver<T>;
+ explicit Next(RefCountedPtr<Center<T>> center) : center_(std::move(center)) {}
+
+ RefCountedPtr<Center<T>> center_;
+};
+
} // namespace pipe_detail
template <typename T>
@@ -683,6 +516,33 @@
}
template <typename T>
+auto PipeReceiver<T>::Next() {
+ return Seq(
+ pipe_detail::Next<T>(center_->Ref()),
+ [center = center_->Ref()](absl::optional<T> value) {
+ bool open = value.has_value();
+ bool cancelled = center->cancelled();
+ return If(
+ open,
+ [center = std::move(center), value = std::move(value)]() mutable {
+ auto run_interceptors = center->Run(std::move(value));
+ return Map(std::move(run_interceptors),
+ [center = std::move(center)](
+ absl::optional<T> value) mutable {
+ if (value.has_value()) {
+ center->value() = std::move(*value);
+ return NextResult<T>(std::move(center));
+ } else {
+ center->MarkCancelled();
+ return NextResult<T>(true);
+ }
+ });
+ },
+ [cancelled]() { return NextResult<T>(cancelled); });
+ });
+}
+
+template <typename T>
using PipeReceiverNextType = decltype(std::declval<PipeReceiver<T>>().Next());
template <typename T>
diff --git a/src/core/lib/promise/promise.h b/src/core/lib/promise/promise.h
index 5da762f..d5683bd 100644
--- a/src/core/lib/promise/promise.h
+++ b/src/core/lib/promise/promise.h
@@ -17,10 +17,10 @@
#include <grpc/support/port_platform.h>
+#include <functional>
#include <type_traits>
#include <utility>
-#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"
@@ -33,7 +33,7 @@
// Most of the time we just pass around the functor, but occasionally
// it pays to have a type erased variant, which we define here.
template <typename T>
-using Promise = absl::AnyInvocable<Poll<T>()>;
+using Promise = std::function<Poll<T>()>;
// Helper to execute a promise immediately and return either the result or
// nothing.
diff --git a/src/core/lib/resource_quota/arena.cc b/src/core/lib/resource_quota/arena.cc
index d37ef42..8811f88 100644
--- a/src/core/lib/resource_quota/arena.cc
+++ b/src/core/lib/resource_quota/arena.cc
@@ -54,9 +54,6 @@
gpr_free_aligned(z);
z = prev_z;
}
-#ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
- gpr_log(GPR_ERROR, "DESTRUCT_ARENA %p", this);
-#endif
}
Arena* Arena::Create(size_t initial_size, MemoryAllocator* memory_allocator) {
@@ -74,7 +71,7 @@
return std::make_pair(new_arena, first_alloc);
}
-void Arena::DestroyManagedNewObjects() {
+void Arena::Destroy() {
ManagedNewObject* p;
// Outer loop: clear the managed new object list.
// We do this repeatedly in case a destructor ends up allocating something.
@@ -85,10 +82,6 @@
Destruct(std::exchange(p, p->next));
}
}
-}
-
-void Arena::Destroy() {
- DestroyManagedNewObjects();
memory_allocator_->Release(total_allocated_.load(std::memory_order_relaxed));
this->~Arena();
gpr_free_aligned(this);
@@ -121,8 +114,7 @@
}
}
-void* Arena::AllocPooled(size_t obj_size, size_t alloc_size,
- std::atomic<FreePoolNode*>* head) {
+void* Arena::AllocPooled(size_t alloc_size, std::atomic<FreePoolNode*>* head) {
// ABA mitigation:
// AllocPooled may be called by multiple threads, and to remove a node from
// the free list we need to manipulate the next pointer, which may be done
@@ -140,11 +132,7 @@
FreePoolNode* p = head->exchange(nullptr, std::memory_order_acquire);
// If there are no nodes in the free list, then go ahead and allocate from the
// arena.
- if (p == nullptr) {
- void* r = Alloc(alloc_size);
- TracePoolAlloc(obj_size, r);
- return r;
- }
+ if (p == nullptr) return Alloc(alloc_size);
// We had a non-empty free list... but we own the *entire* free list.
// We only want one node, so if there are extras we'd better give them back.
if (p->next != nullptr) {
@@ -163,14 +151,10 @@
extra = next;
}
}
- TracePoolAlloc(obj_size, p);
return p;
}
void Arena::FreePooled(void* p, std::atomic<FreePoolNode*>* head) {
- // May spuriously trace a free of an already freed object - see AllocPooled
- // ABA mitigation.
- TracePoolFree(p);
FreePoolNode* node = static_cast<FreePoolNode*>(p);
node->next = head->load(std::memory_order_acquire);
while (!head->compare_exchange_weak(
diff --git a/src/core/lib/resource_quota/arena.h b/src/core/lib/resource_quota/arena.h
index 1dcb530..b4798518 100644
--- a/src/core/lib/resource_quota/arena.h
+++ b/src/core/lib/resource_quota/arena.h
@@ -45,9 +45,6 @@
#include "src/core/lib/promise/context.h"
#include "src/core/lib/resource_quota/memory_quota.h"
-// #define GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
-// #define GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
-
namespace grpc_core {
namespace arena_detail {
@@ -117,9 +114,7 @@
} // namespace arena_detail
class Arena {
- // Selected pool sizes.
- // How to tune: see tools/codegen/core/optimize_arena_pool_sizes.py
- using PoolSizes = absl::integer_sequence<size_t, 80, 304, 528, 1024>;
+ using PoolSizes = absl::integer_sequence<size_t, 256, 512, 768>;
struct FreePoolNode {
FreePoolNode* next;
};
@@ -135,13 +130,6 @@
size_t initial_size, size_t alloc_size,
MemoryAllocator* memory_allocator);
- // Destroy all `ManagedNew` allocated objects.
- // Allows safe destruction of these objects even if they need context held by
- // the arena.
- // Idempotent.
- // TODO(ctiller): eliminate ManagedNew.
- void DestroyManagedNewObjects();
-
// Destroy an arena.
void Destroy();
@@ -182,7 +170,6 @@
return &p->t;
}
-#ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
class PooledDeleter {
public:
explicit PooledDeleter(std::atomic<FreePoolNode*>* free_list)
@@ -222,7 +209,6 @@
&pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
return PoolPtr<T>(
new (AllocPooled(
- sizeof(T),
arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
free_list)) T(std::forward<Args>(args)...),
PooledDeleter(free_list));
@@ -243,95 +229,12 @@
return PoolPtr<T[]>(new (Alloc(where.alloc_size)) T[n],
PooledDeleter(nullptr));
} else {
- return PoolPtr<T[]>(new (AllocPooled(where.alloc_size, where.alloc_size,
- &pools_[where.pool_index])) T[n],
- PooledDeleter(&pools_[where.pool_index]));
+ return PoolPtr<T[]>(
+ new (AllocPooled(where.alloc_size, &pools_[where.pool_index])) T[n],
+ PooledDeleter(&pools_[where.pool_index]));
}
}
- // Like MakePooled, but with manual memory management.
- // The caller is responsible for calling DeletePooled() on the returned
- // pointer, and expected to call it with the same type T as was passed to this
- // function (else the free list returned to the arena will be corrupted).
- template <typename T, typename... Args>
- T* NewPooled(Args&&... args) {
- auto* free_list =
- &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
- return new (AllocPooled(
- sizeof(T),
- arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
- free_list)) T(std::forward<Args>(args)...);
- }
-
- template <typename T>
- void DeletePooled(T* p) {
- auto* free_list =
- &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
- p->~T();
- FreePooled(p, free_list);
- }
-#else
- class PooledDeleter {
- public:
- PooledDeleter() = default;
- explicit PooledDeleter(std::nullptr_t) : delete_(false) {}
- template <typename T>
- void operator()(T* p) {
- // TODO(ctiller): promise based filter hijacks ownership of some pointers
- // to make them appear as PoolPtr without really transferring ownership,
- // by setting the arena to nullptr.
- // This is a transitional hack and should be removed once promise based
- // filter is removed.
- if (delete_) delete p;
- }
-
- bool has_freelist() const { return delete_; }
-
- private:
- bool delete_ = true;
- };
-
- template <typename T>
- using PoolPtr = std::unique_ptr<T, PooledDeleter>;
-
- // Make a unique_ptr to T that is allocated from the arena.
- // When the pointer is released, the memory may be reused for other
- // MakePooled(.*) calls.
- // CAUTION: The amount of memory allocated is rounded up to the nearest
- // value in Arena::PoolSizes, and so this may pessimize total
- // arena size.
- template <typename T, typename... Args>
- PoolPtr<T> MakePooled(Args&&... args) {
- return PoolPtr<T>(new T(std::forward<Args>(args)...), PooledDeleter());
- }
-
- // Make a unique_ptr to an array of T that is allocated from the arena.
- // When the pointer is released, the memory may be reused for other
- // MakePooled(.*) calls.
- // One can use MakePooledArray<char> to allocate a buffer of bytes.
- // CAUTION: The amount of memory allocated is rounded up to the nearest
- // value in Arena::PoolSizes, and so this may pessimize total
- // arena size.
- template <typename T>
- PoolPtr<T[]> MakePooledArray(size_t n) {
- return PoolPtr<T[]>(new T[n], PooledDeleter());
- }
-
- // Like MakePooled, but with manual memory management.
- // The caller is responsible for calling DeletePooled() on the returned
- // pointer, and expected to call it with the same type T as was passed to this
- // function (else the free list returned to the arena will be corrupted).
- template <typename T, typename... Args>
- T* NewPooled(Args&&... args) {
- return new T(std::forward<Args>(args)...);
- }
-
- template <typename T>
- void DeletePooled(T* p) {
- delete p;
- }
-#endif
-
private:
struct Zone {
Zone* prev;
@@ -372,24 +275,9 @@
void* AllocZone(size_t size);
- void* AllocPooled(size_t obj_size, size_t alloc_size,
- std::atomic<FreePoolNode*>* head);
+ void* AllocPooled(size_t alloc_size, std::atomic<FreePoolNode*>* head);
static void FreePooled(void* p, std::atomic<FreePoolNode*>* head);
- void TracePoolAlloc(size_t size, void* ptr) {
- (void)size;
- (void)ptr;
-#ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
- gpr_log(GPR_ERROR, "ARENA %p ALLOC %" PRIdPTR " @ %p", this, size, ptr);
-#endif
- }
- static void TracePoolFree(void* ptr) {
- (void)ptr;
-#ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
- gpr_log(GPR_ERROR, "FREE %p", ptr);
-#endif
- }
-
// Keep track of the total used size. We use this in our call sizing
// hysteresis.
std::atomic<size_t> total_used_{0};
@@ -402,9 +290,7 @@
// last zone; the zone list is reverse-walked during arena destruction only.
std::atomic<Zone*> last_zone_{nullptr};
std::atomic<ManagedNewObject*> managed_new_head_{nullptr};
-#ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
std::atomic<FreePoolNode*> pools_[PoolSizes::size()]{};
-#endif
// The backing memory quota
MemoryAllocator* const memory_allocator_;
};
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index c67b973..f5938e9 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -18,12 +18,12 @@
#include <grpc/support/port_platform.h>
+#include <string.h>
+
#include <algorithm>
#include <atomic>
-#include <cstddef>
#include <functional>
#include <memory>
-#include <string>
#include <utility>
#include "absl/status/status.h"
@@ -41,7 +41,6 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/channel/promise_based_filter.h"
-#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/status_helper.h"
@@ -58,7 +57,6 @@
#include "src/core/lib/security/transport/auth_filters.h" // IWYU pragma: keep
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@@ -122,28 +120,12 @@
// memory later
RunApplicationCode(ServerAuthFilter* filter, CallArgs call_args)
: state_(GetContext<Arena>()->ManagedNew<State>(std::move(call_args))) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_ERROR,
- "%s[server-auth]: Delegate to application: filter=%p this=%p "
- "auth_ctx=%p",
- Activity::current()->DebugTag().c_str(), filter, this,
- filter->auth_context_.get());
- }
filter->server_credentials_->auth_metadata_processor().process(
filter->server_credentials_->auth_metadata_processor().state,
filter->auth_context_.get(), state_->md.metadata, state_->md.count,
OnMdProcessingDone, state_);
}
- RunApplicationCode(const RunApplicationCode&) = delete;
- RunApplicationCode& operator=(const RunApplicationCode&) = delete;
- RunApplicationCode(RunApplicationCode&& other) noexcept
- : state_(std::exchange(other.state_, nullptr)) {}
- RunApplicationCode& operator=(RunApplicationCode&& other) noexcept {
- state_ = std::exchange(other.state_, nullptr);
- return *this;
- }
-
Poll<absl::StatusOr<CallArgs>> operator()() {
if (state_->done.load(std::memory_order_acquire)) {
return Poll<absl::StatusOr<CallArgs>>(std::move(state_->call_args));
diff --git a/src/core/lib/slice/slice.cc b/src/core/lib/slice/slice.cc
index 6180ef1..51ee3a8 100644
--- a/src/core/lib/slice/slice.cc
+++ b/src/core/lib/slice/slice.cc
@@ -480,7 +480,7 @@
}
const uint8_t* last = haystack_bytes + haystack_len - needle_len;
- for (const uint8_t* cur = haystack_bytes; cur <= last; ++cur) {
+ for (const uint8_t* cur = haystack_bytes; cur != last; ++cur) {
if (0 == memcmp(cur, needle_bytes, needle_len)) {
return static_cast<int>(cur - haystack_bytes);
}
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index 940a70d..f273077 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -36,12 +36,14 @@
#include <vector>
#include "absl/base/thread_annotations.h"
+#include "absl/cleanup/cleanup.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
+#include "absl/types/variant.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
@@ -86,13 +88,8 @@
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/detail/basic_seq.h"
-#include "src/core/lib/promise/latch.h"
-#include "src/core/lib/promise/map.h"
-#include "src/core/lib/promise/party.h"
#include "src/core/lib/promise/pipe.h"
#include "src/core/lib/promise/poll.h"
-#include "src/core/lib/promise/race.h"
-#include "src/core/lib/promise/seq.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -102,7 +99,6 @@
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/surface/validate_metadata.h"
-#include "src/core/lib/transport/batch_builder.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@@ -141,13 +137,11 @@
virtual void InternalRef(const char* reason) = 0;
virtual void InternalUnref(const char* reason) = 0;
- grpc_compression_algorithm test_only_compression_algorithm() {
- return incoming_compression_algorithm_;
- }
- uint32_t test_only_message_flags() { return test_only_last_message_flags_; }
- CompressionAlgorithmSet encodings_accepted_by_peer() {
- return encodings_accepted_by_peer_;
- }
+ virtual grpc_compression_algorithm test_only_compression_algorithm() = 0;
+ virtual uint32_t test_only_message_flags() = 0;
+ virtual uint32_t test_only_encodings_accepted_by_peer() = 0;
+ virtual grpc_compression_algorithm compression_for_level(
+ grpc_compression_level level) = 0;
// This should return nullptr for the promise stack (and alternative means
// for that functionality be invented)
@@ -222,26 +216,6 @@
void ClearPeerString() { SetPeerString(Slice(grpc_empty_slice())); }
- // TODO(ctiller): cancel_func is for cancellation of the call - filter stack
- // holds no mutexes here, promise stack does, and so locking is different.
- // Remove this and cancel directly once promise conversion is done.
- void ProcessIncomingInitialMetadata(grpc_metadata_batch& md);
- // Fixup outgoing metadata before sending - adds compression, protects
- // internal headers against external modification.
- void PrepareOutgoingInitialMetadata(const grpc_op& op,
- grpc_metadata_batch& md);
- void NoteLastMessageFlags(uint32_t flags) {
- test_only_last_message_flags_ = flags;
- }
- grpc_compression_algorithm incoming_compression_algorithm() const {
- return incoming_compression_algorithm_;
- }
-
- void HandleCompressionAlgorithmDisabled(
- grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE;
- void HandleCompressionAlgorithmNotAccepted(
- grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE;
-
private:
RefCountedPtr<Channel> channel_;
Arena* const arena_;
@@ -251,18 +225,11 @@
const bool is_client_;
// flag indicating that cancellation is inherited
bool cancellation_is_inherited_ = false;
- // Compression algorithm for *incoming* data
- grpc_compression_algorithm incoming_compression_algorithm_ =
- GRPC_COMPRESS_NONE;
- // Supported encodings (compression algorithms), a bitset.
- // Always support no compression.
- CompressionAlgorithmSet encodings_accepted_by_peer_{GRPC_COMPRESS_NONE};
- uint32_t test_only_last_message_flags_ = 0;
// Peer name is protected by a mutex because it can be accessed by the
// application at the same moment as it is being set by the completion
// of the recv_initial_metadata op. The mutex should be mostly uncontended.
mutable Mutex peer_mu_;
- Slice peer_string_;
+ Slice peer_string_ ABSL_GUARDED_BY(&peer_mu_);
};
Call::ParentCall* Call::GetOrCreateParentCall() {
@@ -357,13 +324,9 @@
void Call::CancelWithStatus(grpc_status_code status, const char* description) {
// copying 'description' is needed to ensure the grpc_call_cancel_with_status
// guarantee that can be short-lived.
- // TODO(ctiller): change to
- // absl::Status(static_cast<absl::StatusCode>(status), description)
- // (ie remove the set_int, set_str).
CancelWithError(grpc_error_set_int(
- grpc_error_set_str(
- absl::Status(static_cast<absl::StatusCode>(status), description),
- StatusStrProperty::kGrpcMessage, description),
+ grpc_error_set_str(GRPC_ERROR_CREATE(description),
+ StatusStrProperty::kGrpcMessage, description),
StatusIntProperty::kRpcStatus, status));
}
@@ -410,92 +373,6 @@
arena->Destroy();
}
-void Call::PrepareOutgoingInitialMetadata(const grpc_op& op,
- grpc_metadata_batch& md) {
- // TODO(juanlishen): If the user has already specified a compression
- // algorithm by setting the initial metadata with key of
- // GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, we shouldn't override that
- // with the compression algorithm mapped from compression level.
- // process compression level
- grpc_compression_level effective_compression_level = GRPC_COMPRESS_LEVEL_NONE;
- bool level_set = false;
- if (op.data.send_initial_metadata.maybe_compression_level.is_set) {
- effective_compression_level =
- op.data.send_initial_metadata.maybe_compression_level.level;
- level_set = true;
- } else {
- const grpc_compression_options copts = channel()->compression_options();
- if (copts.default_level.is_set) {
- level_set = true;
- effective_compression_level = copts.default_level.level;
- }
- }
- // Currently, only server side supports compression level setting.
- if (level_set && !is_client()) {
- const grpc_compression_algorithm calgo =
- encodings_accepted_by_peer().CompressionAlgorithmForLevel(
- effective_compression_level);
- // The following metadata will be checked and removed by the message
- // compression filter. It will be used as the call's compression
- // algorithm.
- md.Set(GrpcInternalEncodingRequest(), calgo);
- }
- // Ignore any te metadata key value pairs specified.
- md.Remove(TeMetadata());
-}
-
-void Call::ProcessIncomingInitialMetadata(grpc_metadata_batch& md) {
- Slice* peer_string = md.get_pointer(PeerString());
- if (peer_string != nullptr) SetPeerString(peer_string->Ref());
-
- incoming_compression_algorithm_ =
- md.Take(GrpcEncodingMetadata()).value_or(GRPC_COMPRESS_NONE);
- encodings_accepted_by_peer_ =
- md.Take(GrpcAcceptEncodingMetadata())
- .value_or(CompressionAlgorithmSet{GRPC_COMPRESS_NONE});
-
- const grpc_compression_options compression_options =
- channel_->compression_options();
- const grpc_compression_algorithm compression_algorithm =
- incoming_compression_algorithm_;
- if (GPR_UNLIKELY(!CompressionAlgorithmSet::FromUint32(
- compression_options.enabled_algorithms_bitset)
- .IsSet(compression_algorithm))) {
- // check if algorithm is supported by current channel config
- HandleCompressionAlgorithmDisabled(compression_algorithm);
- }
- // GRPC_COMPRESS_NONE is always set.
- GPR_DEBUG_ASSERT(encodings_accepted_by_peer_.IsSet(GRPC_COMPRESS_NONE));
- if (GPR_UNLIKELY(!encodings_accepted_by_peer_.IsSet(compression_algorithm))) {
- if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
- HandleCompressionAlgorithmNotAccepted(compression_algorithm);
- }
- }
-}
-
-void Call::HandleCompressionAlgorithmNotAccepted(
- grpc_compression_algorithm compression_algorithm) {
- const char* algo_name = nullptr;
- grpc_compression_algorithm_name(compression_algorithm, &algo_name);
- gpr_log(GPR_ERROR,
- "Compression algorithm ('%s') not present in the "
- "accepted encodings (%s)",
- algo_name,
- std::string(encodings_accepted_by_peer_.ToString()).c_str());
-}
-
-void Call::HandleCompressionAlgorithmDisabled(
- grpc_compression_algorithm compression_algorithm) {
- const char* algo_name = nullptr;
- grpc_compression_algorithm_name(compression_algorithm, &algo_name);
- std::string error_msg =
- absl::StrFormat("Compression algorithm '%s' is disabled.", algo_name);
- gpr_log(GPR_ERROR, "%s", error_msg.c_str());
- CancelWithError(grpc_error_set_int(absl::UnimplementedError(error_msg),
- StatusIntProperty::kRpcStatus,
- GRPC_STATUS_UNIMPLEMENTED));
-}
-
///////////////////////////////////////////////////////////////////////////////
// FilterStackCall
// To be removed once promise conversion is complete
@@ -554,6 +431,11 @@
return context_[elem].value;
}
+ grpc_compression_algorithm compression_for_level(
+ grpc_compression_level level) override {
+ return encodings_accepted_by_peer_.CompressionAlgorithmForLevel(level);
+ }
+
bool is_trailers_only() const override {
bool result = is_trailers_only_;
GPR_DEBUG_ASSERT(!result || recv_initial_metadata_.TransportSize() == 0);
@@ -571,6 +453,18 @@
return authority_metadata->as_string_view();
}
+ grpc_compression_algorithm test_only_compression_algorithm() override {
+ return incoming_compression_algorithm_;
+ }
+
+ uint32_t test_only_message_flags() override {
+ return test_only_last_message_flags_;
+ }
+
+ uint32_t test_only_encodings_accepted_by_peer() override {
+ return encodings_accepted_by_peer_.ToLegacyBitmask();
+ }
+
static size_t InitialSizeEstimate() {
return sizeof(FilterStackCall) +
sizeof(BatchControl) * kMaxConcurrentBatches;
@@ -671,6 +565,7 @@
void FinishStep(PendingOp op);
void ProcessDataAfterMetadata();
void ReceivingStreamReady(grpc_error_handle error);
+ void ValidateFilteredMetadata();
void ReceivingInitialMetadataReady(grpc_error_handle error);
void ReceivingTrailingMetadataReady(grpc_error_handle error);
void FinishBatch(grpc_error_handle error);
@@ -695,6 +590,10 @@
grpc_closure* start_batch_closure);
void SetFinalStatus(grpc_error_handle error);
BatchControl* ReuseOrAllocateBatchControl(const grpc_op* ops);
+ void HandleCompressionAlgorithmDisabled(
+ grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE;
+ void HandleCompressionAlgorithmNotAccepted(
+ grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE;
bool PrepareApplicationMetadata(size_t count, grpc_metadata* metadata,
bool is_trailing);
void PublishAppMetadata(grpc_metadata_batch* b, bool is_trailing);
@@ -738,6 +637,13 @@
// completed
grpc_call_final_info final_info_;
+ // Compression algorithm for *incoming* data
+ grpc_compression_algorithm incoming_compression_algorithm_ =
+ GRPC_COMPRESS_NONE;
+ // Supported encodings (compression algorithms), a bitset.
+ // Always support no compression.
+ CompressionAlgorithmSet encodings_accepted_by_peer_{GRPC_COMPRESS_NONE};
+
// Contexts for various subsystems (security, tracing, ...).
grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
@@ -751,6 +657,7 @@
grpc_closure receiving_stream_ready_;
grpc_closure receiving_initial_metadata_ready_;
grpc_closure receiving_trailing_metadata_ready_;
+ uint32_t test_only_last_message_flags_ = 0;
// Status about operation of call
bool sent_server_trailing_metadata_ = false;
gpr_atm cancelled_with_error_ = 0;
@@ -1187,7 +1094,11 @@
}
void FilterStackCall::RecvInitialFilter(grpc_metadata_batch* b) {
- ProcessIncomingInitialMetadata(*b);
+ incoming_compression_algorithm_ =
+ b->Take(GrpcEncodingMetadata()).value_or(GRPC_COMPRESS_NONE);
+ encodings_accepted_by_peer_ =
+ b->Take(GrpcAcceptEncodingMetadata())
+ .value_or(CompressionAlgorithmSet{GRPC_COMPRESS_NONE});
PublishAppMetadata(b, false);
}
@@ -1356,11 +1267,11 @@
call->receiving_message_ = false;
FinishStep(PendingOp::kRecvMessage);
} else {
- call->NoteLastMessageFlags(call->receiving_stream_flags_);
+ call->test_only_last_message_flags_ = call->receiving_stream_flags_;
if ((call->receiving_stream_flags_ & GRPC_WRITE_INTERNAL_COMPRESS) &&
- (call->incoming_compression_algorithm() != GRPC_COMPRESS_NONE)) {
+ (call->incoming_compression_algorithm_ != GRPC_COMPRESS_NONE)) {
*call->receiving_buffer_ = grpc_raw_compressed_byte_buffer_create(
- nullptr, 0, call->incoming_compression_algorithm());
+ nullptr, 0, call->incoming_compression_algorithm_);
} else {
*call->receiving_buffer_ = grpc_raw_byte_buffer_create(nullptr, 0);
}
@@ -1401,6 +1312,50 @@
}
}
+void FilterStackCall::HandleCompressionAlgorithmDisabled(
+ grpc_compression_algorithm compression_algorithm) {
+ const char* algo_name = nullptr;
+ grpc_compression_algorithm_name(compression_algorithm, &algo_name);
+ std::string error_msg =
+ absl::StrFormat("Compression algorithm '%s' is disabled.", algo_name);
+ gpr_log(GPR_ERROR, "%s", error_msg.c_str());
+ CancelWithStatus(GRPC_STATUS_UNIMPLEMENTED, error_msg.c_str());
+}
+
+void FilterStackCall::HandleCompressionAlgorithmNotAccepted(
+ grpc_compression_algorithm compression_algorithm) {
+ const char* algo_name = nullptr;
+ grpc_compression_algorithm_name(compression_algorithm, &algo_name);
+ gpr_log(GPR_ERROR,
+ "Compression algorithm ('%s') not present in the "
+ "accepted encodings (%s)",
+ algo_name,
+ std::string(encodings_accepted_by_peer_.ToString()).c_str());
+}
+
+void FilterStackCall::BatchControl::ValidateFilteredMetadata() {
+ FilterStackCall* call = call_;
+
+ const grpc_compression_options compression_options =
+ call->channel()->compression_options();
+ const grpc_compression_algorithm compression_algorithm =
+ call->incoming_compression_algorithm_;
+ if (GPR_UNLIKELY(!CompressionAlgorithmSet::FromUint32(
+ compression_options.enabled_algorithms_bitset)
+ .IsSet(compression_algorithm))) {
+ // check if algorithm is supported by current channel config
+ call->HandleCompressionAlgorithmDisabled(compression_algorithm);
+ }
+ // GRPC_COMPRESS_NONE is always set.
+ GPR_DEBUG_ASSERT(call->encodings_accepted_by_peer_.IsSet(GRPC_COMPRESS_NONE));
+ if (GPR_UNLIKELY(
+ !call->encodings_accepted_by_peer_.IsSet(compression_algorithm))) {
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
+ call->HandleCompressionAlgorithmNotAccepted(compression_algorithm);
+ }
+ }
+}
+
void FilterStackCall::BatchControl::ReceivingInitialMetadataReady(
grpc_error_handle error) {
FilterStackCall* call = call_;
@@ -1411,6 +1366,12 @@
grpc_metadata_batch* md = &call->recv_initial_metadata_;
call->RecvInitialFilter(md);
+ // TODO(ctiller): this could be moved into recv_initial_filter now
+ ValidateFilteredMetadata();
+
+ Slice* peer_string = md->get_pointer(PeerString());
+ if (peer_string != nullptr) call->SetPeerString(peer_string->Ref());
+
absl::optional<Timestamp> deadline = md->get(GrpcTimeoutMetadata());
if (deadline.has_value() && !call->is_client()) {
call_->set_send_deadline(*deadline);
@@ -1560,6 +1521,36 @@
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
+ // TODO(juanlishen): If the user has already specified a compression
+ // algorithm by setting the initial metadata with key of
+ // GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, we shouldn't override that
+ // with the compression algorithm mapped from compression level.
+ // process compression level
+ grpc_compression_level effective_compression_level =
+ GRPC_COMPRESS_LEVEL_NONE;
+ bool level_set = false;
+ if (op->data.send_initial_metadata.maybe_compression_level.is_set) {
+ effective_compression_level =
+ op->data.send_initial_metadata.maybe_compression_level.level;
+ level_set = true;
+ } else {
+ const grpc_compression_options copts =
+ channel()->compression_options();
+ if (copts.default_level.is_set) {
+ level_set = true;
+ effective_compression_level = copts.default_level.level;
+ }
+ }
+ // Currently, only server side supports compression level setting.
+ if (level_set && !is_client()) {
+ const grpc_compression_algorithm calgo =
+ encodings_accepted_by_peer_.CompressionAlgorithmForLevel(
+ effective_compression_level);
+ // The following metadata will be checked and removed by the message
+ // compression filter. It will be used as the call's compression
+ // algorithm.
+ send_initial_metadata_.Set(GrpcInternalEncodingRequest(), calgo);
+ }
if (op->data.send_initial_metadata.count > INT_MAX) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
@@ -1572,7 +1563,8 @@
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
- PrepareOutgoingInitialMetadata(*op, send_initial_metadata_);
+ // Ignore any te metadata key value pairs specified.
+ send_initial_metadata_.Remove(TeMetadata());
// TODO(ctiller): just make these the same variable?
if (is_client() && send_deadline() != Timestamp::InfFuture()) {
send_initial_metadata_.Set(GrpcTimeoutMetadata(), send_deadline());
@@ -1949,7 +1941,8 @@
// Will be folded into Call once the promise conversion is done
class PromiseBasedCall : public Call,
- public Party,
+ public Activity,
+ public Wakeable,
public grpc_event_engine::experimental::EventEngine::
Closure /* for deadlines */ {
public:
@@ -1960,62 +1953,176 @@
void (*destroy)(void* value)) override;
void* ContextGet(grpc_context_index elem) const override;
void SetCompletionQueue(grpc_completion_queue* cq) override;
- bool Completed() final { return finished_.IsSet(); }
+ void SetCompletionQueueLocked(grpc_completion_queue* cq)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ void CancelWithError(absl::Status error) final ABSL_LOCKS_EXCLUDED(mu_) {
+ MutexLock lock(&mu_);
+ CancelWithErrorLocked(std::move(error));
+ }
+ virtual void CancelWithErrorLocked(absl::Status error)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) = 0;
+ bool Completed() final ABSL_LOCKS_EXCLUDED(mu_) {
+ MutexLock lock(&mu_);
+ return completed_;
+ }
+
+ void Orphan() final {
+ MutexLock lock(&mu_);
+ if (!completed_) {
+ CancelWithErrorLocked(absl::CancelledError("Call orphaned"));
+ }
+ }
// Implementation of call refcounting: move this to DualRefCounted once we
// don't need to maintain FilterStackCall compatibility
- void ExternalRef() final { InternalRef("external"); }
- void ExternalUnref() final { InternalUnref("external"); }
- void InternalRef(const char* reason) final {
+ void ExternalRef() final {
+ const uint64_t prev_ref_pair =
+ refs_.fetch_add(MakeRefPair(1, 0), std::memory_order_relaxed);
if (grpc_call_refcount_trace.enabled()) {
- gpr_log(GPR_DEBUG, "INTERNAL_REF:%p:%s", this, reason);
+ gpr_log(GPR_DEBUG, "%s EXTERNAL_REF: %d:%d->%d:%d", DebugTag().c_str(),
+ GetStrongRefs(prev_ref_pair), GetWeakRefs(prev_ref_pair),
+ GetStrongRefs(prev_ref_pair) + 1, GetWeakRefs(prev_ref_pair));
}
- Party::IncrementRefCount();
+ }
+ void ExternalUnref() final {
+ const uint64_t prev_ref_pair =
+ refs_.fetch_add(MakeRefPair(-1, 1), std::memory_order_acq_rel);
+ if (grpc_call_refcount_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "%s EXTERNAL_UNREF: %d:%d->%d:%d", DebugTag().c_str(),
+ GetStrongRefs(prev_ref_pair), GetWeakRefs(prev_ref_pair),
+ GetStrongRefs(prev_ref_pair) - 1, GetWeakRefs(prev_ref_pair) + 1);
+ }
+ const uint32_t strong_refs = GetStrongRefs(prev_ref_pair);
+ if (GPR_UNLIKELY(strong_refs == 1)) {
+ Orphan();
+ }
+ // Now drop the weak ref.
+ InternalUnref("external_ref");
+ }
+ void InternalRef(const char* reason) final {
+ uint64_t n = refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_relaxed);
+ if (grpc_call_refcount_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "%s REF: %s %d:%d->%d:%d", DebugTag().c_str(), reason,
+ GetStrongRefs(n), GetWeakRefs(n), GetStrongRefs(n),
+ GetWeakRefs(n) + 1);
+ }
}
void InternalUnref(const char* reason) final {
+ const uint64_t prev_ref_pair =
+ refs_.fetch_sub(MakeRefPair(0, 1), std::memory_order_acq_rel);
if (grpc_call_refcount_trace.enabled()) {
- gpr_log(GPR_DEBUG, "INTERNAL_UNREF:%p:%s", this, reason);
+ gpr_log(GPR_DEBUG, "%s UNREF: %s %d:%d->%d:%d", DebugTag().c_str(),
+ reason, GetStrongRefs(prev_ref_pair), GetWeakRefs(prev_ref_pair),
+ GetStrongRefs(prev_ref_pair), GetWeakRefs(prev_ref_pair) - 1);
}
- Party::Unref();
+ if (GPR_UNLIKELY(prev_ref_pair == MakeRefPair(0, 1))) {
+ DeleteThis();
+ }
}
+ // Activity methods
+ void ForceImmediateRepoll() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) override;
+ Waker MakeOwningWaker() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) override {
+ InternalRef("wakeup");
+// If ASAN is defined, we leverage it to detect dropped Waker objects.
+// Usually Waker must be destroyed or woken up, but (especially with arenas)
+// it's not uncommon to create a Waker and then do neither. In that case it's
+// incredibly fraught to diagnose where the dropped reference to this object was
+// created. Instead, leverage ASAN and create a new object per expected wakeup.
+// Now when we drop such an object ASAN will fail and we'll get a callstack to
+// the creation of the waker in question.
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define GRPC_CALL_USES_ASAN_WAKER
+ class AsanWaker final : public Wakeable {
+ public:
+ explicit AsanWaker(PromiseBasedCall* call) : call_(call) {}
+
+ void Wakeup(void*) override {
+ call_->Wakeup(nullptr);
+ delete this;
+ }
+
+ void Drop(void*) override {
+ call_->Drop(nullptr);
+ delete this;
+ }
+
+ std::string ActivityDebugTag(void*) const override {
+ return call_->DebugTag();
+ }
+
+ private:
+ PromiseBasedCall* call_;
+ };
+ return Waker(new AsanWaker(this), nullptr);
+#endif
+#endif
+#ifndef GRPC_CALL_USES_ASAN_WAKER
+ return Waker(this, nullptr);
+#endif
+ }
+ Waker MakeNonOwningWaker() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) override;
+
+ // Wakeable methods
+ void Wakeup(void*) override {
+ channel()->event_engine()->Run([this] {
+ ApplicationCallbackExecCtx app_exec_ctx;
+ ExecCtx exec_ctx;
+ {
+ ScopedContext activity_context(this);
+ MutexLock lock(&mu_);
+ Update();
+ }
+ InternalUnref("wakeup");
+ });
+ }
+ void Drop(void*) override { InternalUnref("wakeup"); }
+
void RunInContext(absl::AnyInvocable<void()> fn) {
- Spawn(
- "run_in_context",
- [fn = std::move(fn)]() mutable {
+ if (Activity::current() == this) {
+ fn();
+ } else {
+ InternalRef("in_context");
+ channel()->event_engine()->Run([this, fn = std::move(fn)]() mutable {
+ ApplicationCallbackExecCtx app_exec_ctx;
+ ExecCtx exec_ctx;
+ {
+ ScopedContext activity_context(this);
+ MutexLock lock(&mu_);
fn();
- return Empty{};
- },
- [](Empty) {});
+ Update();
+ }
+ InternalUnref("in_context");
+ });
+ }
+ }
+
+ grpc_compression_algorithm test_only_compression_algorithm() override {
+ abort();
+ }
+ uint32_t test_only_message_flags() override { abort(); }
+ uint32_t test_only_encodings_accepted_by_peer() override { abort(); }
+ grpc_compression_algorithm compression_for_level(
+ grpc_compression_level) override {
+ abort();
}
// This should return nullptr for the promise stack (and alternative means
// for that functionality be invented)
grpc_call_stack* call_stack() override { return nullptr; }
- void UpdateDeadline(Timestamp deadline) ABSL_LOCKS_EXCLUDED(deadline_mu_);
- void ResetDeadline() ABSL_LOCKS_EXCLUDED(deadline_mu_);
+ void UpdateDeadline(Timestamp deadline);
+ void ResetDeadline();
// Implementation of EventEngine::Closure, called when deadline expires
void Run() override;
virtual ServerCallContext* server_call_context() { return nullptr; }
- using Call::arena;
-
protected:
- class ScopedBatchCoalescer : public BatchBuilder,
- public promise_detail::Context<BatchBuilder> {
- public:
- explicit ScopedBatchCoalescer(PromiseBasedCall* call)
- : BatchBuilder(&call->batch_payload_),
- promise_detail::Context<BatchBuilder>(
- promise_detail::KeepExistingIfPresent{}, this) {}
- };
-
class ScopedContext
: public ScopedActivity,
- public ScopedBatchCoalescer,
public promise_detail::Context<Arena>,
public promise_detail::Context<grpc_call_context_element>,
public promise_detail::Context<CallContext>,
@@ -2023,7 +2130,6 @@
public:
explicit ScopedContext(PromiseBasedCall* call)
: ScopedActivity(call),
- ScopedBatchCoalescer(call),
promise_detail::Context<Arena>(call->arena()),
promise_detail::Context<grpc_call_context_element>(call->context_),
promise_detail::Context<CallContext>(&call->call_context_),
@@ -2057,12 +2163,8 @@
};
~PromiseBasedCall() override {
+ if (non_owning_wakeable_) non_owning_wakeable_->DropActivity();
if (cq_) GRPC_CQ_INTERNAL_UNREF(cq_, "bind");
- for (int i = 0; i < GRPC_CONTEXT_COUNT; i++) {
- if (context_[i].destroy) {
- context_[i].destroy(context_[i].value);
- }
- }
}
// Enumerates why a Completion is still pending
@@ -2070,7 +2172,6 @@
// We're in the midst of starting a batch of operations
kStartingBatch = 0,
// The following correspond with the batch operations from above
- kSendInitialMetadata,
kReceiveInitialMetadata,
kReceiveStatusOnClient,
kReceiveCloseOnServer = kReceiveStatusOnClient,
@@ -2080,17 +2181,10 @@
kSendCloseFromClient = kSendStatusFromServer,
};
- bool RunParty() override {
- ScopedContext ctx(this);
- return Party::RunParty();
- }
-
const char* PendingOpString(PendingOp reason) const {
switch (reason) {
case PendingOp::kStartingBatch:
return "StartingBatch";
- case PendingOp::kSendInitialMetadata:
- return "SendInitialMetadata";
case PendingOp::kReceiveInitialMetadata:
return "ReceiveInitialMetadata";
case PendingOp::kReceiveStatusOnClient:
@@ -2105,47 +2199,56 @@
return "Unknown";
}
- static constexpr uint32_t PendingOpBit(PendingOp reason) {
+ static constexpr uint8_t PendingOpBit(PendingOp reason) {
return 1 << static_cast<int>(reason);
}
+ Mutex* mu() const ABSL_LOCK_RETURNED(mu_) { return &mu_; }
// Begin work on a completion, recording the tag/closure to notify.
// Use the op selected in \a ops to determine the index to allocate into.
// Starts the "StartingBatch" PendingOp immediately.
// Assumes at least one operation in \a ops.
- Completion StartCompletion(void* tag, bool is_closure, const grpc_op* ops);
+ Completion StartCompletion(void* tag, bool is_closure, const grpc_op* ops)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// Add one pending op to the completion, and return it.
- Completion AddOpToCompletion(const Completion& completion, PendingOp reason);
+ Completion AddOpToCompletion(const Completion& completion, PendingOp reason)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// Stringify a completion
std::string CompletionString(const Completion& completion) const {
return completion.has_value()
- ? completion_info_[completion.index()].pending.ToString(this)
+ ? absl::StrFormat(
+ "%d:tag=%p", static_cast<int>(completion.index()),
+ completion_info_[completion.index()].pending.tag)
: "no-completion";
}
// Finish one op on the completion. Must have been previously been added.
// The completion as a whole finishes when all pending ops finish.
- void FinishOpOnCompletion(Completion* completion, PendingOp reason);
+ void FinishOpOnCompletion(Completion* completion, PendingOp reason)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// Mark the completion as failed. Does not finish it.
void FailCompletion(const Completion& completion,
SourceLocation source_location = {});
- // Mark the completion as infallible. Overrides FailCompletion to report
- // success always.
- void ForceCompletionSuccess(const Completion& completion);
+ // Run the promise polling loop until it stalls.
+ void Update() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ // Update the promise state once.
+ virtual void UpdateOnce() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) = 0;
// Accept the stats from the context (call once we have proof the transport is
// done with them).
// Right now this means that promise based calls do not record correct stats
// with census if they are cancelled.
// TODO(ctiller): this should be remedied before promise based calls are
// dexperimentalized.
- void AcceptTransportStatsFromContext() {
+ void AcceptTransportStatsFromContext() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
final_stats_ = *call_context_.call_stats();
}
- grpc_completion_queue* cq() { return cq_; }
+ grpc_completion_queue* cq() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return cq_; }
void CToMetadata(grpc_metadata* metadata, size_t count,
grpc_metadata_batch* batch);
+ std::string ActivityDebugTag(void*) const override { return DebugTag(); }
+
// At the end of the call run any finalization actions.
void RunFinalization(grpc_status_code status, const char* status_details) {
grpc_call_final_info final_info;
@@ -2174,161 +2277,158 @@
}
}
- // Spawn a job that will first do FirstPromise then receive a message
- template <typename FirstPromise>
+ std::string PollStateDebugString() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return absl::StrCat(PresentAndCompletionText("outstanding_send",
+ outstanding_send_.has_value(),
+ send_message_completion_)
+ .c_str(),
+ PresentAndCompletionText("outstanding_recv",
+ outstanding_recv_.has_value(),
+ recv_message_completion_)
+ .c_str());
+ }
+
void StartRecvMessage(const grpc_op& op, const Completion& completion,
- FirstPromise first,
- PipeReceiver<MessageHandle>* receiver);
+ PipeReceiver<MessageHandle>* receiver)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ void PollRecvMessage(grpc_compression_algorithm compression_algorithm)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ void CancelRecvMessage() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void StartSendMessage(const grpc_op& op, const Completion& completion,
- PipeSender<MessageHandle>* sender);
+ PipeSender<MessageHandle>* sender)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ bool PollSendMessage() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ void CancelSendMessage() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
- void set_completed() { finished_.Set(); }
-
- // Returns a promise that resolves to Empty whenever the call is completed.
- auto finished() { return finished_.Wait(); }
-
- // Returns a promise that resolves to Empty whenever there is no outstanding
- // send operation
- auto WaitForSendingStarted() {
- return [this]() -> Poll<Empty> {
- int n = sends_queued_.load(std::memory_order_relaxed);
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[call] WaitForSendingStarted n=%d",
- DebugTag().c_str(), n);
- }
- if (n != 0) return waiting_for_queued_sends_.pending();
- return Empty{};
- };
+ bool completed() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return completed_;
}
-
- // Mark that a send has been queued - blocks sending trailing metadata.
- void QueueSend() {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[call] QueueSend", DebugTag().c_str());
- }
- sends_queued_.fetch_add(1, std::memory_order_relaxed);
- }
- // Mark that a send has been dequeued - allows sending trailing metadata once
- // zero sends are queued.
- void EnactSend() {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[call] EnactSend", DebugTag().c_str());
- }
- if (1 == sends_queued_.fetch_sub(1, std::memory_order_relaxed)) {
- waiting_for_queued_sends_.Wake();
- }
+ void set_completed() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) { completed_ = true; }
+ bool is_sending() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return outstanding_send_.has_value();
}
private:
union CompletionInfo {
- static constexpr uint32_t kOpFailed = 0x8000'0000u;
- static constexpr uint32_t kOpForceSuccess = 0x4000'0000u;
- CompletionInfo() {}
- enum CompletionState {
- kPending,
- kSuccess,
- kFailure,
- };
struct Pending {
- // Bitmask of PendingOps at the bottom, and kOpFailed, kOpForceSuccess at
- // the top.
- std::atomic<uint32_t> state;
+ // Bitmask of PendingOps
+ uint8_t pending_op_bits;
bool is_closure;
- // True if this completion was for a recv_message op.
- // In that case if the completion as a whole fails we need to cleanup the
- // returned message.
- bool is_recv_message;
+ bool success;
void* tag;
-
- void Start(bool is_closure, void* tag) {
- this->is_closure = is_closure;
- this->is_recv_message = false;
- this->tag = tag;
- state.store(PendingOpBit(PendingOp::kStartingBatch),
- std::memory_order_release);
- }
-
- void AddPendingBit(PendingOp reason) {
- if (reason == PendingOp::kReceiveMessage) is_recv_message = true;
- auto prev =
- state.fetch_or(PendingOpBit(reason), std::memory_order_relaxed);
- GPR_ASSERT((prev & PendingOpBit(reason)) == 0);
- }
-
- CompletionState RemovePendingBit(PendingOp reason) {
- const uint32_t mask = ~PendingOpBit(reason);
- auto prev = state.fetch_and(mask, std::memory_order_acq_rel);
- GPR_ASSERT((prev & PendingOpBit(reason)) != 0);
- switch (prev & mask) {
- case kOpFailed:
- return kFailure;
- case kOpFailed | kOpForceSuccess:
- case kOpForceSuccess:
- case 0:
- return kSuccess;
- default:
- return kPending;
- }
- }
-
- void MarkFailed() {
- state.fetch_or(kOpFailed, std::memory_order_relaxed);
- }
-
- void MarkForceSuccess() {
- state.fetch_or(kOpForceSuccess, std::memory_order_relaxed);
- }
-
- std::string ToString(const PromiseBasedCall* call) const {
- auto state = this->state.load(std::memory_order_relaxed);
- std::vector<absl::string_view> pending_ops;
- for (size_t i = 0; i < 24; i++) {
- if (state & (1u << i)) {
- pending_ops.push_back(
- call->PendingOpString(static_cast<PendingOp>(i)));
- }
- }
- return absl::StrFormat("{%s}%s:tag=%p", absl::StrJoin(pending_ops, ","),
- (state & kOpForceSuccess) ? ":force-success"
- : (state & kOpFailed) ? ":failed"
- : ":success",
- tag);
- }
} pending;
grpc_cq_completion completion;
};
- void PartyOver() override {
- {
- ScopedContext ctx(this);
- CancelRemainingParticipants();
- arena()->DestroyManagedNewObjects();
+ class NonOwningWakable final : public Wakeable {
+ public:
+ explicit NonOwningWakable(PromiseBasedCall* call) : call_(call) {}
+
+ // Ref the Handle (not the activity).
+ void Ref() { refs_.fetch_add(1, std::memory_order_relaxed); }
+
+ // Activity is going away... drop its reference and sever the connection
+ // back.
+ void DropActivity() ABSL_LOCKS_EXCLUDED(mu_) {
+ auto unref = absl::MakeCleanup([this]() { Unref(); });
+ MutexLock lock(&mu_);
+ GPR_ASSERT(call_ != nullptr);
+ call_ = nullptr;
}
- DeleteThis();
+
+ // Activity needs to wake up (if it still exists!) - wake it up, and drop
+ // the ref that was kept for this handle.
+ void Wakeup(void*) override ABSL_LOCKS_EXCLUDED(mu_) {
+ // Drop the ref to the handle at end of scope (we have one ref = one
+ // wakeup semantics).
+ auto unref = absl::MakeCleanup([this]() { Unref(); });
+ ReleasableMutexLock lock(&mu_);
+ // Note that activity refcount can drop to zero, but we could win the lock
+ // against DropActivity, so we need to only increase activities refcount
+ // if it is non-zero.
+ PromiseBasedCall* call = call_;
+ if (call != nullptr && call->RefIfNonZero()) {
+ lock.Release();
+ // Activity still exists and we have a reference: wake it up, which will
+ // drop the ref.
+ call->Wakeup(nullptr);
+ }
+ }
+
+ std::string ActivityDebugTag(void*) const override {
+ MutexLock lock(&mu_);
+ return call_ == nullptr ? "<unknown>" : call_->DebugTag();
+ }
+
+ void Drop(void*) override { Unref(); }
+
+ private:
+ // Unref the Handle (not the activity).
+ void Unref() {
+ if (1 == refs_.fetch_sub(1, std::memory_order_acq_rel)) {
+ delete this;
+ }
+ }
+
+ mutable Mutex mu_;
+ // We have two initial refs: one for the wakeup that this is created for,
+ // and will be dropped by Wakeup, and the other for the activity which is
+ // dropped by DropActivity.
+ std::atomic<size_t> refs_{2};
+ PromiseBasedCall* call_ ABSL_GUARDED_BY(mu_);
+ };
+
+ static void OnDestroy(void* arg, grpc_error_handle) {
+ auto* call = static_cast<PromiseBasedCall*>(arg);
+ ScopedContext context(call);
+ call->DeleteThis();
}
+ // First 32 bits are strong refs, next 32 bits are weak refs.
+ static uint64_t MakeRefPair(uint32_t strong, uint32_t weak) {
+ return (static_cast<uint64_t>(strong) << 32) + static_cast<int64_t>(weak);
+ }
+ static uint32_t GetStrongRefs(uint64_t ref_pair) {
+ return static_cast<uint32_t>(ref_pair >> 32);
+ }
+ static uint32_t GetWeakRefs(uint64_t ref_pair) {
+ return static_cast<uint32_t>(ref_pair & 0xffffffffu);
+ }
+
+ bool RefIfNonZero() {
+ uint64_t prev_ref_pair = refs_.load(std::memory_order_acquire);
+ do {
+ const uint32_t strong_refs = GetStrongRefs(prev_ref_pair);
+ if (strong_refs == 0) return false;
+ } while (!refs_.compare_exchange_weak(
+ prev_ref_pair, prev_ref_pair + MakeRefPair(1, 0),
+ std::memory_order_acq_rel, std::memory_order_acquire));
+ return true;
+ }
+
+ mutable Mutex mu_;
+ std::atomic<uint64_t> refs_;
CallContext call_context_{this};
+ bool keep_polling_ ABSL_GUARDED_BY(mu()) = false;
// Contexts for various subsystems (security, tracing, ...).
grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
- grpc_completion_queue* cq_;
+ grpc_completion_queue* cq_ ABSL_GUARDED_BY(mu_);
+ NonOwningWakable* non_owning_wakeable_ ABSL_GUARDED_BY(mu_) = nullptr;
CompletionInfo completion_info_[6];
grpc_call_stats final_stats_{};
CallFinalization finalization_;
// Current deadline.
- Mutex deadline_mu_;
- Timestamp deadline_ ABSL_GUARDED_BY(deadline_mu_) = Timestamp::InfFuture();
- grpc_event_engine::experimental::EventEngine::TaskHandle ABSL_GUARDED_BY(
- deadline_mu_) deadline_task_;
- ExternallyObservableLatch<void> finished_;
- // Non-zero with an outstanding GRPC_OP_SEND_INITIAL_METADATA or
- // GRPC_OP_SEND_MESSAGE (one count each), and 0 once those payloads have been
- // pushed onto the outgoing pipe.
- std::atomic<uint8_t> sends_queued_{0};
- // Waiter for when sends_queued_ becomes 0.
- IntraActivityWaiter waiting_for_queued_sends_;
- grpc_byte_buffer** recv_message_ = nullptr;
- grpc_transport_stream_op_batch_payload batch_payload_{context_};
+ Timestamp deadline_ = Timestamp::InfFuture();
+ grpc_event_engine::experimental::EventEngine::TaskHandle deadline_task_;
+ absl::optional<PipeSender<MessageHandle>::PushType> outstanding_send_
+ ABSL_GUARDED_BY(mu_);
+ absl::optional<PipeReceiverNextType<MessageHandle>> outstanding_recv_
+ ABSL_GUARDED_BY(mu_);
+ grpc_byte_buffer** recv_message_ ABSL_GUARDED_BY(mu_) = nullptr;
+ Completion send_message_completion_ ABSL_GUARDED_BY(mu_);
+ Completion recv_message_completion_ ABSL_GUARDED_BY(mu_);
+ bool completed_ ABSL_GUARDED_BY(mu_) = false;
};
template <typename T>
@@ -2348,7 +2448,7 @@
const grpc_call_create_args& args)
: Call(arena, args.server_transport_data == nullptr, args.send_deadline,
args.channel->Ref()),
- Party(arena, initial_external_refs),
+ refs_(MakeRefPair(initial_external_refs, 0)),
cq_(args.cq) {
if (args.cq != nullptr) {
GPR_ASSERT(args.pollset_set_alternative == nullptr &&
@@ -2364,6 +2464,15 @@
}
}
+Waker PromiseBasedCall::MakeNonOwningWaker() {
+ if (non_owning_wakeable_ == nullptr) {
+ non_owning_wakeable_ = new NonOwningWakable(this);
+ } else {
+ non_owning_wakeable_->Ref();
+ }
+ return Waker(non_owning_wakeable_, nullptr);
+}
+
void PromiseBasedCall::CToMetadata(grpc_metadata* metadata, size_t count,
grpc_metadata_batch* b) {
for (size_t i = 0; i < count; i++) {
@@ -2398,14 +2507,15 @@
PromiseBasedCall::Completion PromiseBasedCall::StartCompletion(
void* tag, bool is_closure, const grpc_op* ops) {
Completion c(BatchSlotForOp(ops[0].op));
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[call] StartCompletion %s tag=%p", DebugTag().c_str(),
+ CompletionString(c).c_str(), tag);
+ }
if (!is_closure) {
grpc_cq_begin_op(cq(), tag);
}
- completion_info_[c.index()].pending.Start(is_closure, tag);
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO, "%s[call] StartCompletion %s", DebugTag().c_str(),
- CompletionString(c).c_str());
- }
+ completion_info_[c.index()].pending = {
+ PendingOpBit(PendingOp::kStartingBatch), is_closure, true, tag};
return c;
}
@@ -2416,7 +2526,10 @@
CompletionString(completion).c_str(), PendingOpString(reason));
}
GPR_ASSERT(completion.has_value());
- completion_info_[completion.index()].pending.AddPendingBit(reason);
+ auto& pending_op_bits =
+ completion_info_[completion.index()].pending.pending_op_bits;
+ GPR_ASSERT((pending_op_bits & PendingOpBit(reason)) == 0);
+ pending_op_bits |= PendingOpBit(reason);
return Completion(completion.index());
}
@@ -2427,50 +2540,64 @@
"%s[call] FailCompletion %s", DebugTag().c_str(),
CompletionString(completion).c_str());
}
- completion_info_[completion.index()].pending.MarkFailed();
-}
-
-void PromiseBasedCall::ForceCompletionSuccess(const Completion& completion) {
- completion_info_[completion.index()].pending.MarkForceSuccess();
+ completion_info_[completion.index()].pending.success = false;
}
void PromiseBasedCall::FinishOpOnCompletion(Completion* completion,
PendingOp reason) {
if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO, "%s[call] FinishOpOnCompletion completion:%s finish:%s",
- DebugTag().c_str(), CompletionString(*completion).c_str(),
- PendingOpString(reason));
+ auto pending_op_bits =
+ completion_info_[completion->index()].pending.pending_op_bits;
+ bool success = completion_info_[completion->index()].pending.success;
+ std::vector<const char*> pending;
+ for (size_t i = 0; i < 8 * sizeof(pending_op_bits); i++) {
+ if (static_cast<PendingOp>(i) == reason) continue;
+ if (pending_op_bits & (1 << i)) {
+ pending.push_back(PendingOpString(static_cast<PendingOp>(i)));
+ }
+ }
+ gpr_log(
+ GPR_INFO, "%s[call] FinishOpOnCompletion tag:%p %s %s %s",
+ DebugTag().c_str(), completion_info_[completion->index()].pending.tag,
+ CompletionString(*completion).c_str(), PendingOpString(reason),
+ (pending.empty()
+ ? (success ? std::string("done") : std::string("failed"))
+ : absl::StrFormat("pending_ops={%s}", absl::StrJoin(pending, ",")))
+ .c_str());
}
const uint8_t i = completion->TakeIndex();
GPR_ASSERT(i < GPR_ARRAY_SIZE(completion_info_));
CompletionInfo::Pending& pending = completion_info_[i].pending;
- bool success;
- switch (pending.RemovePendingBit(reason)) {
- case CompletionInfo::kPending:
- return; // Early out
- case CompletionInfo::kSuccess:
- success = true;
- break;
- case CompletionInfo::kFailure:
- success = false;
- break;
- }
- if (pending.is_recv_message && !success && *recv_message_ != nullptr) {
- grpc_byte_buffer_destroy(*recv_message_);
- *recv_message_ = nullptr;
- }
- auto error = success ? absl::OkStatus() : absl::CancelledError();
- if (pending.is_closure) {
- ExecCtx::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(pending.tag),
- error);
- } else {
- grpc_cq_end_op(
- cq(), pending.tag, error, [](void*, grpc_cq_completion*) {}, nullptr,
- &completion_info_[i].completion);
+ GPR_ASSERT(pending.pending_op_bits & PendingOpBit(reason));
+ pending.pending_op_bits &= ~PendingOpBit(reason);
+ auto error = pending.success ? absl::OkStatus() : absl::CancelledError();
+ if (pending.pending_op_bits == 0) {
+ if (pending.is_closure) {
+ ExecCtx::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(pending.tag),
+ error);
+ } else {
+ grpc_cq_end_op(
+ cq(), pending.tag, error, [](void*, grpc_cq_completion*) {}, nullptr,
+ &completion_info_[i].completion);
+ }
}
}
+void PromiseBasedCall::Update() {
+ keep_polling_ = false;
+ do {
+ UpdateOnce();
+ } while (std::exchange(keep_polling_, false));
+}
+
+void PromiseBasedCall::ForceImmediateRepoll() { keep_polling_ = true; }
+
void PromiseBasedCall::SetCompletionQueue(grpc_completion_queue* cq) {
+ MutexLock lock(&mu_);
+ SetCompletionQueueLocked(cq);
+}
+
+void PromiseBasedCall::SetCompletionQueueLocked(grpc_completion_queue* cq) {
cq_ = cq;
GRPC_CQ_INTERNAL_REF(cq, "bind");
call_context_.pollent_ =
@@ -2478,12 +2605,6 @@
}
void PromiseBasedCall::UpdateDeadline(Timestamp deadline) {
- MutexLock lock(&deadline_mu_);
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[call] UpdateDeadline from=%s to=%s",
- DebugTag().c_str(), deadline_.ToString().c_str(),
- deadline.ToString().c_str());
- }
if (deadline >= deadline_) return;
auto* const event_engine = channel()->event_engine();
if (deadline_ != Timestamp::InfFuture()) {
@@ -2491,12 +2612,10 @@
} else {
InternalRef("deadline");
}
- deadline_ = deadline;
- deadline_task_ = event_engine->RunAfter(deadline - Timestamp::Now(), this);
+ event_engine->RunAfter(deadline - Timestamp::Now(), this);
}
void PromiseBasedCall::ResetDeadline() {
- MutexLock lock(&deadline_mu_);
if (deadline_ == Timestamp::InfFuture()) return;
auto* const event_engine = channel()->event_engine();
if (!event_engine->Cancel(deadline_task_)) return;
@@ -2514,84 +2633,116 @@
void PromiseBasedCall::StartSendMessage(const grpc_op& op,
const Completion& completion,
PipeSender<MessageHandle>* sender) {
- QueueSend();
- SliceBuffer send;
- grpc_slice_buffer_swap(
- &op.data.send_message.send_message->data.raw.slice_buffer,
- send.c_slice_buffer());
- auto msg = arena()->MakePooled<Message>(std::move(send), op.flags);
- Spawn(
- "call_send_message",
- [this, sender, msg = std::move(msg)]() mutable {
- EnactSend();
- return sender->Push(std::move(msg));
- },
- [this, completion = AddOpToCompletion(
- completion, PendingOp::kSendMessage)](bool result) mutable {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%sSendMessage completes %s", DebugTag().c_str(),
- result ? "successfully" : "with failure");
- }
- if (!result) FailCompletion(completion);
- FinishOpOnCompletion(&completion, PendingOp::kSendMessage);
- });
+ GPR_ASSERT(!outstanding_send_.has_value());
+ if (!completed_) {
+ send_message_completion_ =
+ AddOpToCompletion(completion, PendingOp::kSendMessage);
+ SliceBuffer send;
+ grpc_slice_buffer_swap(
+ &op.data.send_message.send_message->data.raw.slice_buffer,
+ send.c_slice_buffer());
+ outstanding_send_.emplace(sender->Push(
+ GetContext<Arena>()->MakePooled<Message>(std::move(send), op.flags)));
+ } else {
+ FailCompletion(completion);
+ }
}
-template <typename FirstPromise>
+bool PromiseBasedCall::PollSendMessage() {
+ if (!outstanding_send_.has_value()) return true;
+ Poll<bool> r = (*outstanding_send_)();
+ if (const bool* result = r.value_if_ready()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "%sPollSendMessage completes %s", DebugTag().c_str(),
+ *result ? "successfully" : "with failure");
+ }
+ if (!*result) {
+ FailCompletion(send_message_completion_);
+ return false;
+ }
+ FinishOpOnCompletion(&send_message_completion_, PendingOp::kSendMessage);
+ outstanding_send_.reset();
+ }
+ return true;
+}
+
+void PromiseBasedCall::CancelSendMessage() {
+ if (!outstanding_send_.has_value()) return;
+ FinishOpOnCompletion(&send_message_completion_, PendingOp::kSendMessage);
+ outstanding_send_.reset();
+}
+
void PromiseBasedCall::StartRecvMessage(const grpc_op& op,
const Completion& completion,
- FirstPromise first_promise,
PipeReceiver<MessageHandle>* receiver) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO, "%s[call] Start RecvMessage: %s", DebugTag().c_str(),
- CompletionString(completion).c_str());
- }
+ GPR_ASSERT(!outstanding_recv_.has_value());
recv_message_ = op.data.recv_message.recv_message;
- Spawn(
- "call_recv_message",
- Seq(std::move(first_promise), [receiver]() { return receiver->Next(); }),
- [this,
- completion = AddOpToCompletion(completion, PendingOp::kReceiveMessage)](
- NextResult<MessageHandle> result) mutable {
- if (result.has_value()) {
- MessageHandle& message = *result;
- NoteLastMessageFlags(message->flags());
- if ((message->flags() & GRPC_WRITE_INTERNAL_COMPRESS) &&
- (incoming_compression_algorithm() != GRPC_COMPRESS_NONE)) {
- *recv_message_ = grpc_raw_compressed_byte_buffer_create(
- nullptr, 0, incoming_compression_algorithm());
- } else {
- *recv_message_ = grpc_raw_byte_buffer_create(nullptr, 0);
- }
- grpc_slice_buffer_move_into(message->payload()->c_slice_buffer(),
- &(*recv_message_)->data.raw.slice_buffer);
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO,
- "%s[call] RecvMessage: outstanding_recv "
- "finishes: received %" PRIdPTR " byte message",
- DebugTag().c_str(),
- (*recv_message_)->data.raw.slice_buffer.length);
- }
- } else if (result.cancelled()) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO,
- "%s[call] RecvMessage: outstanding_recv "
- "finishes: received end-of-stream with error",
- DebugTag().c_str());
- }
- FailCompletion(completion);
- *recv_message_ = nullptr;
- } else {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO,
- "%s[call] RecvMessage: outstanding_recv "
- "finishes: received end-of-stream",
- DebugTag().c_str());
- }
- *recv_message_ = nullptr;
- }
- FinishOpOnCompletion(&completion, PendingOp::kReceiveMessage);
- });
+ recv_message_completion_ =
+ AddOpToCompletion(completion, PendingOp::kReceiveMessage);
+ outstanding_recv_.emplace(receiver->Next());
+}
+
+void PromiseBasedCall::PollRecvMessage(
+ grpc_compression_algorithm incoming_compression_algorithm) {
+ if (!outstanding_recv_.has_value()) return;
+ Poll<NextResult<MessageHandle>> r = (*outstanding_recv_)();
+ if (auto* result = r.value_if_ready()) {
+ outstanding_recv_.reset();
+ if (result->has_value()) {
+ MessageHandle& message = **result;
+ if ((message->flags() & GRPC_WRITE_INTERNAL_COMPRESS) &&
+ (incoming_compression_algorithm != GRPC_COMPRESS_NONE)) {
+ *recv_message_ = grpc_raw_compressed_byte_buffer_create(
+ nullptr, 0, incoming_compression_algorithm);
+ } else {
+ *recv_message_ = grpc_raw_byte_buffer_create(nullptr, 0);
+ }
+ grpc_slice_buffer_move_into(message->payload()->c_slice_buffer(),
+ &(*recv_message_)->data.raw.slice_buffer);
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[call] PollRecvMessage: outstanding_recv finishes: received "
+ "%" PRIdPTR " byte message",
+ DebugTag().c_str(),
+ (*recv_message_)->data.raw.slice_buffer.length);
+ }
+ } else if (result->cancelled()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[call] PollRecvMessage: outstanding_recv finishes: received "
+ "end-of-stream with error",
+ DebugTag().c_str());
+ }
+ FailCompletion(recv_message_completion_);
+ *recv_message_ = nullptr;
+ } else {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[call] PollRecvMessage: outstanding_recv finishes: received "
+ "end-of-stream",
+ DebugTag().c_str());
+ }
+ *recv_message_ = nullptr;
+ }
+ FinishOpOnCompletion(&recv_message_completion_, PendingOp::kReceiveMessage);
+ } else if (completed_) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "%s[call] UpdateOnce: outstanding_recv finishes: promise has "
+ "completed without queuing a message, forcing end-of-stream",
+ DebugTag().c_str());
+ }
+ outstanding_recv_.reset();
+ *recv_message_ = nullptr;
+ FinishOpOnCompletion(&recv_message_completion_, PendingOp::kReceiveMessage);
+ }
+}
+
+void PromiseBasedCall::CancelRecvMessage() {
+ if (!outstanding_recv_.has_value()) return;
+ *recv_message_ = nullptr;
+ outstanding_recv_.reset();
+ FinishOpOnCompletion(&recv_message_completion_, PendingOp::kReceiveMessage);
}
///////////////////////////////////////////////////////////////////////////////
@@ -2660,40 +2811,24 @@
~ClientPromiseBasedCall() override {
ScopedContext context(this);
send_initial_metadata_.reset();
- // Need to destroy the pipes under the ScopedContext above, so we
- // move them out here and then allow the destructors to run at
- // end of scope, but before context.
+ recv_status_on_client_ = absl::monostate();
+ promise_ = ArenaPromise<ServerMetadataHandle>();
+ // Need to destroy the pipes under the ScopedContext above, so we move them
+ // out here and then allow the destructors to run at end of scope, but
+ // before context.
auto c2s = std::move(client_to_server_messages_);
auto s2c = std::move(server_to_client_messages_);
auto sim = std::move(server_initial_metadata_);
}
- void CancelWithError(absl::Status error) override {
- if (!started_.exchange(true, std::memory_order_relaxed)) {
- // Initial metadata not sent yet, so we can just fail the call.
- Spawn(
- "cancel_before_initial_metadata",
- [error = std::move(error), this]() {
- server_to_client_messages_.sender.Close();
- Finish(ServerMetadataFromStatus(error));
- return Empty{};
- },
- [](Empty) {});
- } else {
- Spawn(
- "cancel_with_error",
- [error = std::move(error), this]() {
- if (!cancel_error_.is_set()) {
- cancel_error_.Set(ServerMetadataFromStatus(error));
- }
- return Empty{};
- },
- [](Empty) {});
- }
- }
absl::string_view GetServerAuthority() const override { abort(); }
- bool is_trailers_only() const override { return is_trailers_only_; }
- bool failed_before_recv_message() const override { return false; }
+ void CancelWithErrorLocked(grpc_error_handle error) override
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
+ bool is_trailers_only() const override {
+ MutexLock lock(mu());
+ return is_trailers_only_;
+ }
+ bool failed_before_recv_message() const override { abort(); }
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
@@ -2703,76 +2838,65 @@
}
private:
+ // Poll the underlying promise (and sundry objects) once.
+ void UpdateOnce() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) override;
// Finish the call with the given status/trailing metadata.
- void Finish(ServerMetadataHandle trailing_metadata);
+ void Finish(ServerMetadataHandle trailing_metadata)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
// Validate that a set of ops is valid for a client call.
- grpc_call_error ValidateBatch(const grpc_op* ops, size_t nops) const;
+ grpc_call_error ValidateBatch(const grpc_op* ops, size_t nops) const
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
// Commit a valid batch of operations to be executed.
void CommitBatch(const grpc_op* ops, size_t nops,
- const Completion& completion);
+ const Completion& completion)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
// Start the underlying promise.
- void StartPromise(ClientMetadataHandle client_initial_metadata,
- const Completion& completion);
- // Start receiving initial metadata
- void StartRecvInitialMetadata(grpc_metadata_array* array,
- const Completion& completion);
- void StartRecvStatusOnClient(
- const Completion& completion,
- grpc_op::grpc_op_data::grpc_op_recv_status_on_client op_args);
+ void StartPromise(ClientMetadataHandle client_initial_metadata)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
// Publish status out to the application.
void PublishStatus(
grpc_op::grpc_op_data::grpc_op_recv_status_on_client op_args,
- ServerMetadataHandle trailing_metadata);
+ ServerMetadataHandle trailing_metadata)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
// Publish server initial metadata out to the application.
- void PublishInitialMetadata(ServerMetadata* metadata);
+ void PublishInitialMetadata(ServerMetadata* metadata)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
+
+ ArenaPromise<ServerMetadataHandle> promise_ ABSL_GUARDED_BY(mu());
+ Pipe<ServerMetadataHandle> server_initial_metadata_ ABSL_GUARDED_BY(mu()){
+ arena()};
+ Pipe<MessageHandle> client_to_server_messages_ ABSL_GUARDED_BY(mu()){arena()};
+ Pipe<MessageHandle> server_to_client_messages_ ABSL_GUARDED_BY(mu()){arena()};
ClientMetadataHandle send_initial_metadata_;
- Pipe<ServerMetadataHandle> server_initial_metadata_{arena()};
- Latch<ServerMetadataHandle> server_trailing_metadata_;
- Latch<ServerMetadataHandle> cancel_error_;
- Pipe<MessageHandle> client_to_server_messages_{arena()};
- Pipe<MessageHandle> server_to_client_messages_{arena()};
- bool is_trailers_only_;
- // True once the promise for the call is started.
- // This corresponds to sending initial metadata, or cancelling before doing
- // so.
- // In the latter case real world code sometimes does not sent the initial
- // metadata, and so gating based upon that does not work out.
- std::atomic<bool> started_{false};
+ grpc_metadata_array* recv_initial_metadata_ ABSL_GUARDED_BY(mu()) = nullptr;
+ absl::variant<absl::monostate,
+ grpc_op::grpc_op_data::grpc_op_recv_status_on_client,
+ ServerMetadataHandle>
+ recv_status_on_client_ ABSL_GUARDED_BY(mu());
+ absl::optional<PipeReceiverNextType<ServerMetadataHandle>>
+ server_initial_metadata_ready_;
+ absl::optional<grpc_compression_algorithm> incoming_compression_algorithm_;
+ Completion recv_initial_metadata_completion_ ABSL_GUARDED_BY(mu());
+ Completion recv_status_on_client_completion_ ABSL_GUARDED_BY(mu());
+ Completion close_send_completion_ ABSL_GUARDED_BY(mu());
+ bool is_trailers_only_ ABSL_GUARDED_BY(mu());
};
void ClientPromiseBasedCall::StartPromise(
- ClientMetadataHandle client_initial_metadata,
- const Completion& completion) {
- auto token = ClientInitialMetadataOutstandingToken::New(arena());
- Spawn("call_send_initial_metadata", token.Wait(),
- [this, completion = AddOpToCompletion(completion,
- PendingOp::kSendInitialMetadata)](
- bool result) mutable {
- if (!result) FailCompletion(completion);
- FinishOpOnCompletion(&completion, PendingOp::kSendInitialMetadata);
- });
- Spawn(
- "client_promise",
- [this, client_initial_metadata = std::move(client_initial_metadata),
- token = std::move(token)]() mutable {
- return Race(
- cancel_error_.Wait(),
- Map(channel()->channel_stack()->MakeClientCallPromise(
- CallArgs{std::move(client_initial_metadata),
- std::move(token), &server_initial_metadata_.sender,
- &client_to_server_messages_.receiver,
- &server_to_client_messages_.sender}),
- [this](ServerMetadataHandle trailing_metadata) {
- // If we're cancelled the transport doesn't get to return
- // stats.
- AcceptTransportStatsFromContext();
- return trailing_metadata;
- }));
- },
- [this](ServerMetadataHandle trailing_metadata) {
- Finish(std::move(trailing_metadata));
- });
+ ClientMetadataHandle client_initial_metadata) {
+ GPR_ASSERT(!promise_.has_value());
+ promise_ = channel()->channel_stack()->MakeClientCallPromise(CallArgs{
+ std::move(client_initial_metadata),
+ &server_initial_metadata_.sender,
+ &client_to_server_messages_.receiver,
+ &server_to_client_messages_.sender,
+ });
+}
+
+void ClientPromiseBasedCall::CancelWithErrorLocked(grpc_error_handle error) {
+ ScopedContext context(this);
+ Finish(ServerMetadataFromStatus(grpc_error_to_absl_status(error)));
}
grpc_call_error ClientPromiseBasedCall::ValidateBatch(const grpc_op* ops,
@@ -2813,55 +2937,49 @@
void ClientPromiseBasedCall::CommitBatch(const grpc_op* ops, size_t nops,
const Completion& completion) {
- ScopedBatchCoalescer coalescer(this);
for (size_t op_idx = 0; op_idx < nops; op_idx++) {
const grpc_op& op = ops[op_idx];
switch (op.op) {
case GRPC_OP_SEND_INITIAL_METADATA: {
- if (started_.exchange(true, std::memory_order_relaxed)) break;
- CToMetadata(op.data.send_initial_metadata.metadata,
- op.data.send_initial_metadata.count,
- send_initial_metadata_.get());
- PrepareOutgoingInitialMetadata(op, *send_initial_metadata_);
- if (send_deadline() != Timestamp::InfFuture()) {
- send_initial_metadata_->Set(GrpcTimeoutMetadata(), send_deadline());
+ // compression not implemented
+ GPR_ASSERT(
+ !op.data.send_initial_metadata.maybe_compression_level.is_set);
+ if (!completed()) {
+ CToMetadata(op.data.send_initial_metadata.metadata,
+ op.data.send_initial_metadata.count,
+ send_initial_metadata_.get());
+ StartPromise(std::move(send_initial_metadata_));
}
- send_initial_metadata_->Set(
- WaitForReady(),
- WaitForReady::ValueType{
- (op.flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) != 0,
- (op.flags &
- GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET) != 0});
- StartPromise(std::move(send_initial_metadata_), completion);
} break;
case GRPC_OP_RECV_INITIAL_METADATA: {
- StartRecvInitialMetadata(
- op.data.recv_initial_metadata.recv_initial_metadata, completion);
+ recv_initial_metadata_ =
+ op.data.recv_initial_metadata.recv_initial_metadata;
+ server_initial_metadata_ready_.emplace(
+ server_initial_metadata_.receiver.Next());
+ recv_initial_metadata_completion_ =
+ AddOpToCompletion(completion, PendingOp::kReceiveInitialMetadata);
} break;
case GRPC_OP_RECV_STATUS_ON_CLIENT: {
- StartRecvStatusOnClient(completion, op.data.recv_status_on_client);
+ recv_status_on_client_completion_ =
+ AddOpToCompletion(completion, PendingOp::kReceiveStatusOnClient);
+ if (auto* finished_metadata =
+ absl::get_if<ServerMetadataHandle>(&recv_status_on_client_)) {
+ PublishStatus(op.data.recv_status_on_client,
+ std::move(*finished_metadata));
+ } else {
+ recv_status_on_client_ = op.data.recv_status_on_client;
+ }
} break;
case GRPC_OP_SEND_MESSAGE:
StartSendMessage(op, completion, &client_to_server_messages_.sender);
break;
case GRPC_OP_RECV_MESSAGE:
- StartRecvMessage(op, completion,
- server_initial_metadata_.receiver.AwaitClosed(),
- &server_to_client_messages_.receiver);
+ StartRecvMessage(op, completion, &server_to_client_messages_.receiver);
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
- Spawn(
- "send_close_from_client",
- [this]() {
- client_to_server_messages_.sender.Close();
- return Empty{};
- },
- [this,
- completion = AddOpToCompletion(
- completion, PendingOp::kSendCloseFromClient)](Empty) mutable {
- FinishOpOnCompletion(&completion,
- PendingOp::kSendCloseFromClient);
- });
+ close_send_completion_ =
+ AddOpToCompletion(completion, PendingOp::kSendCloseFromClient);
+ GPR_ASSERT(close_send_completion_.has_value());
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
case GRPC_OP_RECV_CLOSE_ON_SERVER:
@@ -2874,6 +2992,8 @@
size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
+ MutexLock lock(mu());
+ ScopedContext activity_context(this);
if (nops == 0) {
EndOpImmediately(cq(), notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
@@ -2885,33 +3005,71 @@
Completion completion =
StartCompletion(notify_tag, is_notify_tag_closure, ops);
CommitBatch(ops, nops, completion);
+ Update();
FinishOpOnCompletion(&completion, PendingOp::kStartingBatch);
return GRPC_CALL_OK;
}
-void ClientPromiseBasedCall::StartRecvInitialMetadata(
- grpc_metadata_array* array, const Completion& completion) {
- Spawn("recv_initial_metadata",
- Race(server_initial_metadata_.receiver.Next(),
- Map(finished(),
- [](Empty) { return NextResult<ServerMetadataHandle>(true); })),
- [this, array,
- completion =
- AddOpToCompletion(completion, PendingOp::kReceiveInitialMetadata)](
- NextResult<ServerMetadataHandle> next_metadata) mutable {
- server_initial_metadata_.sender.Close();
- ServerMetadataHandle metadata;
- if (next_metadata.has_value()) {
- is_trailers_only_ = false;
- metadata = std::move(next_metadata.value());
- } else {
- is_trailers_only_ = true;
- metadata = arena()->MakePooled<ServerMetadata>(arena());
- }
- ProcessIncomingInitialMetadata(*metadata);
- PublishMetadataArray(metadata.get(), array);
- FinishOpOnCompletion(&completion, PendingOp::kReceiveInitialMetadata);
- });
+void ClientPromiseBasedCall::PublishInitialMetadata(ServerMetadata* metadata) {
+ incoming_compression_algorithm_ =
+ metadata->Take(GrpcEncodingMetadata()).value_or(GRPC_COMPRESS_NONE);
+ Slice* peer_string = metadata->get_pointer(PeerString());
+ if (peer_string != nullptr) SetPeerString(peer_string->Ref());
+ server_initial_metadata_ready_.reset();
+ GPR_ASSERT(recv_initial_metadata_ != nullptr);
+ PublishMetadataArray(metadata,
+ std::exchange(recv_initial_metadata_, nullptr));
+ FinishOpOnCompletion(&recv_initial_metadata_completion_,
+ PendingOp::kReceiveInitialMetadata);
+}
+
+void ClientPromiseBasedCall::UpdateOnce() {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[call] UpdateOnce: %s%shas_promise=%s",
+ DebugTag().c_str(),
+ PresentAndCompletionText("server_initial_metadata_ready",
+ server_initial_metadata_ready_.has_value(),
+ recv_initial_metadata_completion_)
+ .c_str(),
+ PollStateDebugString().c_str(),
+ promise_.has_value() ? "true" : "false");
+ }
+ if (server_initial_metadata_ready_.has_value()) {
+ Poll<NextResult<ServerMetadataHandle>> r =
+ (*server_initial_metadata_ready_)();
+ if (auto* server_initial_metadata = r.value_if_ready()) {
+ PublishInitialMetadata(server_initial_metadata->value().get());
+ } else if (completed()) {
+ ServerMetadata no_metadata{GetContext<Arena>()};
+ PublishInitialMetadata(&no_metadata);
+ }
+ }
+ if (!PollSendMessage()) {
+ Finish(ServerMetadataFromStatus(absl::Status(
+ absl::StatusCode::kInternal, "Failed to send message to server")));
+ }
+ if (!is_sending() && close_send_completion_.has_value()) {
+ client_to_server_messages_.sender.Close();
+ FinishOpOnCompletion(&close_send_completion_,
+ PendingOp::kSendCloseFromClient);
+ }
+ if (promise_.has_value()) {
+ Poll<ServerMetadataHandle> r = promise_();
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[call] UpdateOnce: promise returns %s",
+ DebugTag().c_str(),
+ PollToString(r, [](const ServerMetadataHandle& h) {
+ return h->DebugString();
+ }).c_str());
+ }
+ if (auto* result = r.value_if_ready()) {
+ AcceptTransportStatsFromContext();
+ Finish(std::move(*result));
+ }
+ }
+ if (incoming_compression_algorithm_.has_value()) {
+ PollRecvMessage(*incoming_compression_algorithm_);
+ }
}
void ClientPromiseBasedCall::Finish(ServerMetadataHandle trailing_metadata) {
@@ -2919,9 +3077,31 @@
gpr_log(GPR_INFO, "%s[call] Finish: %s", DebugTag().c_str(),
trailing_metadata->DebugString().c_str());
}
+ promise_ = ArenaPromise<ServerMetadataHandle>();
ResetDeadline();
set_completed();
- client_to_server_messages_.sender.Close();
+ if (recv_initial_metadata_ != nullptr) {
+ ForceImmediateRepoll();
+ }
+ const bool pending_initial_metadata =
+ server_initial_metadata_ready_.has_value();
+ if (!pending_initial_metadata) {
+ server_initial_metadata_ready_.emplace(
+ server_initial_metadata_.receiver.Next());
+ }
+ Poll<NextResult<ServerMetadataHandle>> r =
+ (*server_initial_metadata_ready_)();
+ server_initial_metadata_ready_.reset();
+ if (auto* result = r.value_if_ready()) {
+ if (pending_initial_metadata) PublishInitialMetadata(result->value().get());
+ is_trailers_only_ = false;
+ } else {
+ if (pending_initial_metadata) {
+ ServerMetadata no_metadata{GetContext<Arena>()};
+ PublishInitialMetadata(&no_metadata);
+ }
+ is_trailers_only_ = true;
+ }
if (auto* channelz_channel = channel()->channelz_node()) {
if (trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) == GRPC_STATUS_OK) {
@@ -2930,7 +3110,13 @@
channelz_channel->RecordCallFailed();
}
}
- server_trailing_metadata_.Set(std::move(trailing_metadata));
+ if (auto* status_request =
+ absl::get_if<grpc_op::grpc_op_data::grpc_op_recv_status_on_client>(
+ &recv_status_on_client_)) {
+ PublishStatus(*status_request, std::move(trailing_metadata));
+ } else {
+ recv_status_on_client_ = std::move(trailing_metadata);
+ }
}
namespace {
@@ -2956,41 +3142,35 @@
}
} // namespace
-void ClientPromiseBasedCall::StartRecvStatusOnClient(
- const Completion& completion,
- grpc_op::grpc_op_data::grpc_op_recv_status_on_client op_args) {
- ForceCompletionSuccess(completion);
- Spawn("recv_status_on_client", server_trailing_metadata_.Wait(),
- [this, op_args,
- completion =
- AddOpToCompletion(completion, PendingOp::kReceiveStatusOnClient)](
- ServerMetadataHandle trailing_metadata) mutable {
- const grpc_status_code status =
- trailing_metadata->get(GrpcStatusMetadata())
- .value_or(GRPC_STATUS_UNKNOWN);
- *op_args.status = status;
- absl::string_view message_string;
- if (Slice* message =
- trailing_metadata->get_pointer(GrpcMessageMetadata())) {
- message_string = message->as_string_view();
- *op_args.status_details = message->Ref().TakeCSlice();
- } else {
- *op_args.status_details = grpc_empty_slice();
- }
- if (message_string.empty()) {
- RunFinalization(status, nullptr);
- } else {
- std::string error_string(message_string);
- RunFinalization(status, error_string.c_str());
- }
- if (op_args.error_string != nullptr && status != GRPC_STATUS_OK) {
- *op_args.error_string =
- gpr_strdup(MakeErrorString(trailing_metadata.get()).c_str());
- }
- PublishMetadataArray(trailing_metadata.get(),
- op_args.trailing_metadata);
- FinishOpOnCompletion(&completion, PendingOp::kReceiveStatusOnClient);
- });
+void ClientPromiseBasedCall::PublishStatus(
+ grpc_op::grpc_op_data::grpc_op_recv_status_on_client op_args,
+ ServerMetadataHandle trailing_metadata) {
+ const grpc_status_code status = trailing_metadata->get(GrpcStatusMetadata())
+ .value_or(GRPC_STATUS_UNKNOWN);
+ *op_args.status = status;
+ absl::string_view message_string;
+ if (Slice* message = trailing_metadata->get_pointer(GrpcMessageMetadata())) {
+ message_string = message->as_string_view();
+ *op_args.status_details = message->Ref().TakeCSlice();
+ } else {
+ *op_args.status_details = grpc_empty_slice();
+ }
+ if (message_string.empty()) {
+ RunFinalization(status, nullptr);
+ } else {
+ std::string error_string(message_string);
+ RunFinalization(status, error_string.c_str());
+ }
+ if (op_args.error_string != nullptr && status != GRPC_STATUS_OK) {
+ *op_args.error_string =
+ gpr_strdup(MakeErrorString(trailing_metadata.get()).c_str());
+ }
+ PublishMetadataArray(trailing_metadata.get(), op_args.trailing_metadata);
+ // Clear state saying we have a RECV_STATUS_ON_CLIENT outstanding
+ // (so we don't call through twice)
+ recv_status_on_client_ = absl::monostate();
+ FinishOpOnCompletion(&recv_status_on_client_completion_,
+ PendingOp::kReceiveStatusOnClient);
}
#endif
@@ -3003,18 +3183,19 @@
public:
ServerPromiseBasedCall(Arena* arena, grpc_call_create_args* args);
- void CancelWithError(grpc_error_handle) override;
+ void CancelWithErrorLocked(grpc_error_handle) override
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
- bool failed_before_recv_message() const override { return false; }
+ bool failed_before_recv_message() const override { abort(); }
bool is_trailers_only() const override { abort(); }
absl::string_view GetServerAuthority() const override { return ""; }
// Polling order for the server promise stack:
//
// │ ┌───────────────────────────────────────┐
- // │ │ ServerPromiseBasedCall ├──► Lifetime management
- // │ ├───────────────────────────────────────┤
+ // │ │ ServerPromiseBasedCall::UpdateOnce ├──► Lifetime management,
+ // │ ├───────────────────────────────────────┤ signal call end to app
// │ │ ConnectedChannel ├─┐
// │ ├───────────────────────────────────────┤ └► Interactions with the
// │ │ ... closest to transport filter │ transport - send/recv msgs
@@ -3025,12 +3206,16 @@
// │ ├───────────────────────────────────────┤ │ setup, publishing call to
// │ │ Server::ChannelData::MakeCallPromise ├─┘ application
// │ ├───────────────────────────────────────┤
- // │ │ MakeTopOfServerCallPromise ├──► Send trailing metadata
- // ▼ └───────────────────────────────────────┘
- // Polling &
+ // │ │ ServerPromiseBasedCall::PollTopOfCall ├──► Application interactions,
+ // ▼ └───────────────────────────────────────┘ forwarding messages,
+ // Polling & sending trailing metadata
// instantiation
// order
+ void UpdateOnce() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu()) override;
+ Poll<ServerMetadataHandle> PollTopOfCall()
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
+
std::string DebugTag() const override {
return absl::StrFormat("SERVER_CALL[%p]: ", this);
}
@@ -3040,64 +3225,44 @@
private:
class RecvCloseOpCancelState {
public:
- // Request that receiver be filled in per
- // grpc_op_recv_close_on_server. Returns true if the request can
- // be fulfilled immediately. Returns false if the request will be
- // fulfilled later.
+ // Request that receiver be filled in per grpc_op_recv_close_on_server.
+ // Returns true if the request can be fulfilled immediately.
+ // Returns false if the request will be fulfilled later.
bool ReceiveCloseOnServerOpStarted(int* receiver) {
- uintptr_t state = state_.load(std::memory_order_acquire);
- uintptr_t new_state;
- do {
- switch (state) {
- case kUnset:
- new_state = reinterpret_cast<uintptr_t>(receiver);
- break;
- case kFinishedWithFailure:
- *receiver = 1;
- return true;
- case kFinishedWithSuccess:
- *receiver = 0;
- return true;
- default:
- Crash("Two threads offered ReceiveCloseOnServerOpStarted");
- }
- } while (!state_.compare_exchange_weak(state, new_state,
- std::memory_order_acq_rel,
- std::memory_order_acquire));
- return false;
+ switch (state_) {
+ case kUnset:
+ state_ = reinterpret_cast<uintptr_t>(receiver);
+ return false;
+ case kFinishedWithFailure:
+ *receiver = 1;
+ return true;
+ case kFinishedWithSuccess:
+ *receiver = 0;
+ return true;
+ default:
+ abort(); // unreachable
+ }
}
// Mark the call as having completed.
- // Returns true if this finishes a previous
- // RequestReceiveCloseOnServer.
- bool CompleteCallWithCancelledSetTo(bool cancelled) {
- uintptr_t state = state_.load(std::memory_order_acquire);
- uintptr_t new_state;
- bool r;
- do {
- switch (state) {
- case kUnset:
- new_state = cancelled ? kFinishedWithFailure : kFinishedWithSuccess;
- r = false;
- break;
- case kFinishedWithFailure:
- return false;
- case kFinishedWithSuccess:
- Crash("unreachable");
- default:
- new_state = cancelled ? kFinishedWithFailure : kFinishedWithSuccess;
- r = true;
- }
- } while (!state_.compare_exchange_weak(state, new_state,
- std::memory_order_acq_rel,
- std::memory_order_acquire));
- if (r) *reinterpret_cast<int*>(state) = cancelled ? 1 : 0;
- return r;
+ // Returns true if this finishes a previous RequestReceiveCloseOnServer.
+ bool CompleteCall(bool success) {
+ switch (state_) {
+ case kUnset:
+ state_ = success ? kFinishedWithSuccess : kFinishedWithFailure;
+ return false;
+ case kFinishedWithFailure:
+ case kFinishedWithSuccess:
+ abort(); // unreachable
+ default:
+ *reinterpret_cast<int*>(state_) = success ? 0 : 1;
+ state_ = success ? kFinishedWithSuccess : kFinishedWithFailure;
+ return true;
+ }
}
std::string ToString() const {
- auto state = state_.load(std::memory_order_relaxed);
- switch (state) {
+ switch (state_) {
case kUnset:
return "Unset";
case kFinishedWithFailure:
@@ -3106,7 +3271,7 @@
return "FinishedWithSuccess";
default:
return absl::StrFormat("WaitingForReceiver(%p)",
- reinterpret_cast<void*>(state));
+ reinterpret_cast<void*>(state_));
}
}
@@ -3114,28 +3279,37 @@
static constexpr uintptr_t kUnset = 0;
static constexpr uintptr_t kFinishedWithFailure = 1;
static constexpr uintptr_t kFinishedWithSuccess = 2;
- // Holds one of kUnset, kFinishedWithFailure, or
- // kFinishedWithSuccess OR an int* that wants to receive the
- // final status.
- std::atomic<uintptr_t> state_{kUnset};
+ // Holds one of kUnset, kFinishedWithFailure, or kFinishedWithSuccess
+ // OR an int* that wants to receive the final status.
+ uintptr_t state_ = kUnset;
};
grpc_call_error ValidateBatch(const grpc_op* ops, size_t nops) const;
void CommitBatch(const grpc_op* ops, size_t nops,
- const Completion& completion);
- void Finish(ServerMetadataHandle result);
+ const Completion& completion)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu());
friend class ServerCallContext;
ServerCallContext call_context_;
Server* const server_;
- PipeSender<ServerMetadataHandle>* server_initial_metadata_ = nullptr;
- PipeSender<MessageHandle>* server_to_client_messages_ = nullptr;
- PipeReceiver<MessageHandle>* client_to_server_messages_ = nullptr;
- Latch<ServerMetadataHandle> send_trailing_metadata_;
- RecvCloseOpCancelState recv_close_op_cancel_state_;
- ClientMetadataHandle client_initial_metadata_;
- Completion recv_close_completion_;
- std::atomic<bool> cancelled_{false};
+ ArenaPromise<ServerMetadataHandle> promise_ ABSL_GUARDED_BY(mu());
+ PipeSender<MessageHandle>* server_to_client_messages_ ABSL_GUARDED_BY(mu()) =
+ nullptr;
+ PipeReceiver<MessageHandle>* client_to_server_messages_
+ ABSL_GUARDED_BY(mu()) = nullptr;
+ using SendInitialMetadataState =
+ absl::variant<absl::monostate, PipeSender<ServerMetadataHandle>*,
+ typename PipeSender<ServerMetadataHandle>::PushType>;
+ SendInitialMetadataState send_initial_metadata_state_ ABSL_GUARDED_BY(mu()) =
+ absl::monostate{};
+ ServerMetadataHandle send_trailing_metadata_ ABSL_GUARDED_BY(mu());
+ grpc_compression_algorithm incoming_compression_algorithm_
+ ABSL_GUARDED_BY(mu());
+ RecvCloseOpCancelState recv_close_op_cancel_state_ ABSL_GUARDED_BY(mu());
+ Completion recv_close_completion_ ABSL_GUARDED_BY(mu());
+ bool cancel_send_and_receive_ ABSL_GUARDED_BY(mu()) = false;
+ Completion send_status_from_server_completion_ ABSL_GUARDED_BY(mu());
+ ClientMetadataHandle client_initial_metadata_ ABSL_GUARDED_BY(mu());
};
ServerPromiseBasedCall::ServerPromiseBasedCall(Arena* arena,
@@ -3168,40 +3342,106 @@
ContextSet(GRPC_CONTEXT_CALL_TRACER, server_call_tracer, nullptr);
}
}
+ MutexLock lock(mu());
ScopedContext activity_context(this);
- Spawn("server_promise",
- channel()->channel_stack()->MakeServerCallPromise(
- CallArgs{nullptr, ClientInitialMetadataOutstandingToken::Empty(),
- nullptr, nullptr, nullptr}),
- [this](ServerMetadataHandle result) { Finish(std::move(result)); });
+ promise_ = channel()->channel_stack()->MakeServerCallPromise(
+ CallArgs{nullptr, nullptr, nullptr, nullptr});
}
-void ServerPromiseBasedCall::Finish(ServerMetadataHandle result) {
+Poll<ServerMetadataHandle> ServerPromiseBasedCall::PollTopOfCall() {
if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO, "%s[call] Finish: recv_close_state:%s result:%s",
- DebugTag().c_str(), recv_close_op_cancel_state_.ToString().c_str(),
- result->DebugString().c_str());
+ gpr_log(GPR_INFO, "%s[call] PollTopOfCall: %s%s%s", DebugTag().c_str(),
+ cancel_send_and_receive_ ? "force-" : "",
+ send_trailing_metadata_ != nullptr
+ ? absl::StrCat("send-metadata:",
+ send_trailing_metadata_->DebugString(), " ")
+ .c_str()
+ : " ",
+ PollStateDebugString().c_str());
}
- if (recv_close_op_cancel_state_.CompleteCallWithCancelledSetTo(
- result->get(GrpcCallWasCancelled()).value_or(true))) {
- FinishOpOnCompletion(&recv_close_completion_,
- PendingOp::kReceiveCloseOnServer);
+
+ if (cancel_send_and_receive_) {
+ CancelSendMessage();
+ CancelRecvMessage();
}
- if (server_initial_metadata_ != nullptr) {
- server_initial_metadata_->Close();
+
+ PollSendMessage();
+ PollRecvMessage(incoming_compression_algorithm_);
+
+ if (!is_sending() && send_trailing_metadata_ != nullptr) {
+ server_to_client_messages_->Close();
+ return std::move(send_trailing_metadata_);
}
- channelz::ServerNode* channelz_node = server_->channelz_node();
- if (channelz_node != nullptr) {
- if (result->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN) ==
- GRPC_STATUS_OK) {
- channelz_node->RecordCallSucceeded();
- } else {
- channelz_node->RecordCallFailed();
+
+ return Pending{};
+}
+
+void ServerPromiseBasedCall::UpdateOnce() {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(
+ GPR_INFO, "%s[call] UpdateOnce: recv_close:%s%s %s%shas_promise=%s",
+ DebugTag().c_str(), recv_close_op_cancel_state_.ToString().c_str(),
+ recv_close_completion_.has_value()
+ ? absl::StrCat(":", CompletionString(recv_close_completion_))
+ .c_str()
+ : "",
+ send_status_from_server_completion_.has_value()
+ ? absl::StrCat(
+ "send_status:",
+ CompletionString(send_status_from_server_completion_), " ")
+ .c_str()
+ : "",
+ PollStateDebugString().c_str(),
+ promise_.has_value() ? "true" : "false");
+ }
+ if (auto* p =
+ absl::get_if<typename PipeSender<ServerMetadataHandle>::PushType>(
+ &send_initial_metadata_state_)) {
+ if ((*p)().ready()) {
+ send_initial_metadata_state_ = absl::monostate{};
}
}
- set_completed();
- ResetDeadline();
- PropagateCancellationToChildren();
+ if (promise_.has_value()) {
+ auto r = promise_();
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[call] UpdateOnce: promise returns %s",
+ DebugTag().c_str(),
+ PollToString(r, [](const ServerMetadataHandle& h) {
+ return h->DebugString();
+ }).c_str());
+ }
+ if (auto* result = r.value_if_ready()) {
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[call] UpdateOnce: GotResult %s result:%s",
+ DebugTag().c_str(),
+ recv_close_op_cancel_state_.ToString().c_str(),
+ (*result)->DebugString().c_str());
+ }
+ if (recv_close_op_cancel_state_.CompleteCall(
+ (*result)->get(GrpcStatusFromWire()).value_or(false))) {
+ FinishOpOnCompletion(&recv_close_completion_,
+ PendingOp::kReceiveCloseOnServer);
+ }
+ channelz::ServerNode* channelz_node = server_->channelz_node();
+ if (channelz_node != nullptr) {
+ if ((*result)
+ ->get(GrpcStatusMetadata())
+ .value_or(GRPC_STATUS_UNKNOWN) == GRPC_STATUS_OK) {
+ channelz_node->RecordCallSucceeded();
+ } else {
+ channelz_node->RecordCallFailed();
+ }
+ }
+ if (send_status_from_server_completion_.has_value()) {
+ FinishOpOnCompletion(&send_status_from_server_completion_,
+ PendingOp::kSendStatusFromServer);
+ }
+ CancelSendMessage();
+ CancelRecvMessage();
+ set_completed();
+ promise_ = ArenaPromise<ServerMetadataHandle>();
+ }
+ }
}
grpc_call_error ServerPromiseBasedCall::ValidateBatch(const grpc_op* ops,
@@ -3242,92 +3482,56 @@
void ServerPromiseBasedCall::CommitBatch(const grpc_op* ops, size_t nops,
const Completion& completion) {
- ScopedBatchCoalescer coalescer(this);
for (size_t op_idx = 0; op_idx < nops; op_idx++) {
const grpc_op& op = ops[op_idx];
switch (op.op) {
case GRPC_OP_SEND_INITIAL_METADATA: {
- auto metadata = arena()->MakePooled<ServerMetadata>(arena());
- PrepareOutgoingInitialMetadata(op, *metadata);
- CToMetadata(op.data.send_initial_metadata.metadata,
- op.data.send_initial_metadata.count, metadata.get());
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_INFO, "%s[call] Send initial metadata",
- DebugTag().c_str());
+ // compression not implemented
+ GPR_ASSERT(
+ !op.data.send_initial_metadata.maybe_compression_level.is_set);
+ if (!completed()) {
+ auto metadata = arena()->MakePooled<ServerMetadata>(arena());
+ CToMetadata(op.data.send_initial_metadata.metadata,
+ op.data.send_initial_metadata.count, metadata.get());
+ if (grpc_call_trace.enabled()) {
+ gpr_log(GPR_INFO, "%s[call] Send initial metadata",
+ DebugTag().c_str());
+ }
+ auto* pipe = absl::get<PipeSender<ServerMetadataHandle>*>(
+ send_initial_metadata_state_);
+ send_initial_metadata_state_ = pipe->Push(std::move(metadata));
}
- QueueSend();
- Spawn(
- "call_send_initial_metadata",
- [this, metadata = std::move(metadata)]() mutable {
- EnactSend();
- return server_initial_metadata_->Push(std::move(metadata));
- },
- [this,
- completion = AddOpToCompletion(
- completion, PendingOp::kSendInitialMetadata)](bool r) mutable {
- if (!r) FailCompletion(completion);
- FinishOpOnCompletion(&completion,
- PendingOp::kSendInitialMetadata);
- });
} break;
case GRPC_OP_SEND_MESSAGE:
StartSendMessage(op, completion, server_to_client_messages_);
break;
case GRPC_OP_RECV_MESSAGE:
- if (cancelled_.load(std::memory_order_relaxed)) {
- FailCompletion(completion);
- break;
- }
- StartRecvMessage(
- op, completion, []() { return Empty{}; },
- client_to_server_messages_);
+ StartRecvMessage(op, completion, client_to_server_messages_);
break;
- case GRPC_OP_SEND_STATUS_FROM_SERVER: {
- auto metadata = arena()->MakePooled<ServerMetadata>(arena());
+ case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ send_trailing_metadata_ = arena()->MakePooled<ServerMetadata>(arena());
CToMetadata(op.data.send_status_from_server.trailing_metadata,
op.data.send_status_from_server.trailing_metadata_count,
- metadata.get());
- metadata->Set(GrpcStatusMetadata(),
- op.data.send_status_from_server.status);
+ send_trailing_metadata_.get());
+ send_trailing_metadata_->Set(GrpcStatusMetadata(),
+ op.data.send_status_from_server.status);
if (auto* details = op.data.send_status_from_server.status_details) {
- metadata->Set(GrpcMessageMetadata(), Slice(CSliceRef(*details)));
+ send_trailing_metadata_->Set(GrpcMessageMetadata(),
+ Slice(CSliceRef(*details)));
}
- Spawn(
- "call_send_status_from_server",
- [this, metadata = std::move(metadata)]() mutable {
- bool r = true;
- if (send_trailing_metadata_.is_set()) {
- r = false;
- } else {
- send_trailing_metadata_.Set(std::move(metadata));
- }
- return Map(WaitForSendingStarted(), [this, r](Empty) {
- server_initial_metadata_->Close();
- server_to_client_messages_->Close();
- return r;
- });
- },
- [this, completion = AddOpToCompletion(
- completion, PendingOp::kSendStatusFromServer)](
- bool ok) mutable {
- if (!ok) FailCompletion(completion);
- FinishOpOnCompletion(&completion,
- PendingOp::kSendStatusFromServer);
- });
- } break;
+ send_status_from_server_completion_ =
+ AddOpToCompletion(completion, PendingOp::kSendStatusFromServer);
+ break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "%s[call] StartBatch: RecvClose %s",
DebugTag().c_str(),
recv_close_op_cancel_state_.ToString().c_str());
}
- ForceCompletionSuccess(completion);
- recv_close_completion_ =
- AddOpToCompletion(completion, PendingOp::kReceiveCloseOnServer);
- if (recv_close_op_cancel_state_.ReceiveCloseOnServerOpStarted(
+ if (!recv_close_op_cancel_state_.ReceiveCloseOnServerOpStarted(
op.data.recv_close_on_server.cancelled)) {
- FinishOpOnCompletion(&recv_close_completion_,
- PendingOp::kReceiveCloseOnServer);
+ recv_close_completion_ =
+ AddOpToCompletion(completion, PendingOp::kReceiveCloseOnServer);
}
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
@@ -3342,6 +3546,8 @@
size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
+ MutexLock lock(mu());
+ ScopedContext activity_context(this);
if (nops == 0) {
EndOpImmediately(cq(), notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
@@ -3353,30 +3559,18 @@
Completion completion =
StartCompletion(notify_tag, is_notify_tag_closure, ops);
CommitBatch(ops, nops, completion);
+ Update();
FinishOpOnCompletion(&completion, PendingOp::kStartingBatch);
return GRPC_CALL_OK;
}
-void ServerPromiseBasedCall::CancelWithError(absl::Status error) {
- cancelled_.store(true, std::memory_order_relaxed);
- Spawn(
- "cancel_with_error",
- [this, error = std::move(error)]() {
- if (!send_trailing_metadata_.is_set()) {
- auto md = ServerMetadataFromStatus(error);
- md->Set(GrpcCallWasCancelled(), true);
- send_trailing_metadata_.Set(std::move(md));
- }
- if (server_to_client_messages_ != nullptr) {
- server_to_client_messages_->Close();
- }
- if (server_initial_metadata_ != nullptr) {
- server_initial_metadata_->Close();
- }
- return Empty{};
- },
- [](Empty) {});
+void ServerPromiseBasedCall::CancelWithErrorLocked(absl::Status error) {
+ if (!promise_.has_value()) return;
+ cancel_send_and_receive_ = true;
+ send_trailing_metadata_ = ServerMetadataFromStatus(error, arena());
+ ForceWakeup();
}
+
#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
@@ -3385,19 +3579,24 @@
CallArgs call_args, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata,
absl::FunctionRef<void(grpc_call* call)> publish) {
- call_->SetCompletionQueue(cq);
+ call_->mu()->AssertHeld();
+ call_->SetCompletionQueueLocked(cq);
call_->server_to_client_messages_ = call_args.server_to_client_messages;
call_->client_to_server_messages_ = call_args.client_to_server_messages;
- call_->server_initial_metadata_ = call_args.server_initial_metadata;
+ call_->send_initial_metadata_state_ = call_args.server_initial_metadata;
+ call_->incoming_compression_algorithm_ =
+ call_args.client_initial_metadata->get(GrpcEncodingMetadata())
+ .value_or(GRPC_COMPRESS_NONE);
call_->client_initial_metadata_ =
std::move(call_args.client_initial_metadata);
- call_->ProcessIncomingInitialMetadata(*call_->client_initial_metadata_);
PublishMetadataArray(call_->client_initial_metadata_.get(),
publish_initial_metadata);
call_->ExternalRef();
publish(call_->c_ptr());
- return Seq(call_->server_to_client_messages_->AwaitClosed(),
- call_->send_trailing_metadata_.Wait());
+ return [this]() {
+ call_->mu()->AssertHeld();
+ return call_->PollTopOfCall();
+ };
}
#else
ArenaPromise<ServerMetadataHandle>
@@ -3500,9 +3699,7 @@
}
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call* call) {
- return grpc_core::Call::FromC(call)
- ->encodings_accepted_by_peer()
- .ToLegacyBitmask();
+ return grpc_core::Call::FromC(call)->test_only_encodings_accepted_by_peer();
}
grpc_core::Arena* grpc_call_get_arena(grpc_call* call) {
@@ -3551,9 +3748,7 @@
grpc_compression_algorithm grpc_call_compression_for_level(
grpc_call* call, grpc_compression_level level) {
- return grpc_core::Call::FromC(call)
- ->encodings_accepted_by_peer()
- .CompressionAlgorithmForLevel(level);
+ return grpc_core::Call::FromC(call)->compression_for_level(level);
}
bool grpc_call_is_trailers_only(const grpc_call* call) {
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 61176bb..ea4d393 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -119,11 +119,6 @@
// TODO(ctiller): remove this once transport APIs are promise based
void Unref(const char* reason = "call_context");
- RefCountedPtr<CallContext> Ref() {
- IncrementRefCount();
- return RefCountedPtr<CallContext>(this);
- }
-
grpc_call_stats* call_stats() { return &call_stats_; }
gpr_atm* peer_string_atm_ptr();
grpc_polling_entity* polling_entity() { return &pollent_; }
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index 7fbdf8e..ecbc9ee 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -79,7 +79,6 @@
if (args.server_to_client_messages != nullptr) {
args.server_to_client_messages->Close();
}
- args.client_initial_metadata_outstanding.Complete(true);
return Immediate(ServerMetadataFromStatus(error_));
}
diff --git a/src/core/lib/transport/batch_builder.cc b/src/core/lib/transport/batch_builder.cc
deleted file mode 100644
index 06d8c0a..0000000
--- a/src/core/lib/transport/batch_builder.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2023 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/transport/batch_builder.h"
-
-#include <type_traits>
-
-#include "src/core/lib/promise/poll.h"
-#include "src/core/lib/slice/slice.h"
-#include "src/core/lib/surface/call_trace.h"
-#include "src/core/lib/transport/metadata_batch.h"
-#include "src/core/lib/transport/transport.h"
-#include "src/core/lib/transport/transport_impl.h"
-
-namespace grpc_core {
-
-BatchBuilder::BatchBuilder(grpc_transport_stream_op_batch_payload* payload)
- : payload_(payload) {}
-
-void BatchBuilder::PendingCompletion::CompletionCallback(
- void* self, grpc_error_handle error) {
- auto* pc = static_cast<PendingCompletion*>(self);
- auto* party = pc->batch->party.get();
- if (grpc_call_trace.enabled()) {
- gpr_log(
- GPR_DEBUG, "%s[connected] Finish batch-component %s for %s: status=%s",
- party->DebugTag().c_str(), std::string(pc->name()).c_str(),
- grpc_transport_stream_op_batch_string(&pc->batch->batch, false).c_str(),
- error.ToString().c_str());
- }
- party->Spawn(
- "batch-completion",
- [pc, error = std::move(error)]() mutable {
- RefCountedPtr<Batch> batch = std::exchange(pc->batch, nullptr);
- pc->done_latch.Set(std::move(error));
- return Empty{};
- },
- [](Empty) {});
-}
-
-BatchBuilder::PendingCompletion::PendingCompletion(RefCountedPtr<Batch> batch)
- : batch(std::move(batch)) {
- GRPC_CLOSURE_INIT(&on_done_closure, CompletionCallback, this, nullptr);
-}
-
-BatchBuilder::Batch::Batch(grpc_transport_stream_op_batch_payload* payload,
- grpc_stream_refcount* stream_refcount)
- : party(static_cast<Party*>(Activity::current())->Ref()),
- stream_refcount(stream_refcount) {
- batch.payload = payload;
- batch.is_traced = GetContext<CallContext>()->traced();
-#ifndef NDEBUG
- grpc_stream_ref(stream_refcount, "pending-batch");
-#else
- grpc_stream_ref(stream_refcount);
-#endif
-}
-
-BatchBuilder::Batch::~Batch() {
- auto* arena = party->arena();
- if (pending_receive_message != nullptr) {
- arena->DeletePooled(pending_receive_message);
- }
- if (pending_receive_initial_metadata != nullptr) {
- arena->DeletePooled(pending_receive_initial_metadata);
- }
- if (pending_receive_trailing_metadata != nullptr) {
- arena->DeletePooled(pending_receive_trailing_metadata);
- }
- if (pending_sends != nullptr) {
- arena->DeletePooled(pending_sends);
- }
- if (batch.cancel_stream) {
- arena->DeletePooled(batch.payload);
- }
-#ifndef NDEBUG
- grpc_stream_unref(stream_refcount, "pending-batch");
-#else
- grpc_stream_unref(stream_refcount);
-#endif
-}
-
-BatchBuilder::Batch* BatchBuilder::GetBatch(Target target) {
- if (target_.has_value() &&
- (target_->stream != target.stream ||
- target.transport->vtable
- ->hacky_disable_stream_op_batch_coalescing_in_connected_channel)) {
- FlushBatch();
- }
- if (!target_.has_value()) {
- target_ = target;
- batch_ = GetContext<Arena>()->NewPooled<Batch>(payload_,
- target_->stream_refcount);
- }
- GPR_ASSERT(batch_ != nullptr);
- return batch_;
-}
-
-void BatchBuilder::FlushBatch() {
- GPR_ASSERT(batch_ != nullptr);
- GPR_ASSERT(target_.has_value());
- if (grpc_call_trace.enabled()) {
- gpr_log(
- GPR_DEBUG, "%s[connected] Perform transport stream op batch: %p %s",
- batch_->party->DebugTag().c_str(), &batch_->batch,
- grpc_transport_stream_op_batch_string(&batch_->batch, false).c_str());
- }
- std::exchange(batch_, nullptr)->PerformWith(*target_);
- target_.reset();
-}
-
-void BatchBuilder::Batch::PerformWith(Target target) {
- grpc_transport_perform_stream_op(target.transport, target.stream, &batch);
-}
-
-ServerMetadataHandle BatchBuilder::CompleteSendServerTrailingMetadata(
- ServerMetadataHandle sent_metadata, absl::Status send_result,
- bool actually_sent) {
- if (!send_result.ok()) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "%s[connected] Send metadata failed with error: %s, "
- "fabricating trailing metadata",
- Activity::current()->DebugTag().c_str(),
- send_result.ToString().c_str());
- }
- sent_metadata->Clear();
- sent_metadata->Set(GrpcStatusMetadata(),
- static_cast<grpc_status_code>(send_result.code()));
- sent_metadata->Set(GrpcMessageMetadata(),
- Slice::FromCopiedString(send_result.message()));
- sent_metadata->Set(GrpcCallWasCancelled(), true);
- }
- if (!sent_metadata->get(GrpcCallWasCancelled()).has_value()) {
- if (grpc_call_trace.enabled()) {
- gpr_log(
- GPR_DEBUG,
- "%s[connected] Tagging trailing metadata with "
- "cancellation status from transport: %s",
- Activity::current()->DebugTag().c_str(),
- actually_sent ? "sent => not-cancelled" : "not-sent => cancelled");
- }
- sent_metadata->Set(GrpcCallWasCancelled(), !actually_sent);
- }
- return sent_metadata;
-}
-
-BatchBuilder::Batch* BatchBuilder::MakeCancel(
- grpc_stream_refcount* stream_refcount, absl::Status status) {
- auto* arena = GetContext<Arena>();
- auto* payload =
- arena->NewPooled<grpc_transport_stream_op_batch_payload>(nullptr);
- auto* batch = arena->NewPooled<Batch>(payload, stream_refcount);
- batch->batch.cancel_stream = true;
- payload->cancel_stream.cancel_error = std::move(status);
- return batch;
-}
-
-void BatchBuilder::Cancel(Target target, absl::Status status) {
- auto* batch = MakeCancel(target.stream_refcount, std::move(status));
- batch->batch.on_complete = NewClosure(
- [batch](absl::Status) { batch->party->arena()->DeletePooled(batch); });
- batch->PerformWith(target);
-}
-
-} // namespace grpc_core
diff --git a/src/core/lib/transport/batch_builder.h b/src/core/lib/transport/batch_builder.h
deleted file mode 100644
index 5b0056a..0000000
--- a/src/core/lib/transport/batch_builder.h
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2023 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
-#define GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
-
-#include <grpc/support/port_platform.h>
-
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-#include <utility>
-
-#include "absl/status/status.h"
-#include "absl/status/statusor.h"
-#include "absl/strings/string_view.h"
-#include "absl/types/optional.h"
-
-#include <grpc/status.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/channel/channel_stack.h"
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/gprpp/status_helper.h"
-#include "src/core/lib/iomgr/closure.h"
-#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/promise/activity.h"
-#include "src/core/lib/promise/context.h"
-#include "src/core/lib/promise/latch.h"
-#include "src/core/lib/promise/map.h"
-#include "src/core/lib/promise/party.h"
-#include "src/core/lib/resource_quota/arena.h"
-#include "src/core/lib/slice/slice_buffer.h"
-#include "src/core/lib/surface/call.h"
-#include "src/core/lib/surface/call_trace.h"
-#include "src/core/lib/transport/metadata_batch.h"
-#include "src/core/lib/transport/transport.h"
-#include "src/core/lib/transport/transport_fwd.h"
-
-namespace grpc_core {
-
-// Build up a transport stream op batch for a stream for a promise based
-// connected channel.
-// Offered as a context from Call, so that it can collect ALL the updates during
-// a single party round, and then push them down to the transport as a single
-// transaction.
-class BatchBuilder {
- public:
- explicit BatchBuilder(grpc_transport_stream_op_batch_payload* payload);
- ~BatchBuilder() {
- if (batch_ != nullptr) FlushBatch();
- }
-
- struct Target {
- grpc_transport* transport;
- grpc_stream* stream;
- grpc_stream_refcount* stream_refcount;
- };
-
- BatchBuilder(const BatchBuilder&) = delete;
- BatchBuilder& operator=(const BatchBuilder&) = delete;
-
- // Returns a promise that will resolve to a Status when the send is completed.
- auto SendMessage(Target target, MessageHandle message);
-
- // Returns a promise that will resolve to a Status when the send is completed.
- auto SendClientInitialMetadata(Target target, ClientMetadataHandle metadata);
-
- // Returns a promise that will resolve to a Status when the send is completed.
- auto SendClientTrailingMetadata(Target target);
-
- // Returns a promise that will resolve to a Status when the send is completed.
- auto SendServerInitialMetadata(Target target, ServerMetadataHandle metadata);
-
- // Returns a promise that will resolve to a ServerMetadataHandle when the send
- // is completed.
- //
- // If convert_to_cancellation is true, then the status will be converted to a
- // cancellation batch instead of a trailing metadata op in a coalesced batch.
- //
- // This quirk exists as in the filter based stack upon which our transports
- // were written if a trailing metadata op were sent it always needed to be
- // paired with an initial op batch, and the transports would wait for the
- // initial metadata batch to arrive (in case of reordering up the stack).
- auto SendServerTrailingMetadata(Target target, ServerMetadataHandle metadata,
- bool convert_to_cancellation);
-
- // Returns a promise that will resolve to a StatusOr<optional<MessageHandle>>
- // when a message is received.
- // Error => non-ok status
- // End of stream => Ok, nullopt (no message)
- // Message => Ok, message
- auto ReceiveMessage(Target target);
-
- // Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
- // when the receive is complete.
- auto ReceiveClientInitialMetadata(Target target);
-
- // Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
- // when the receive is complete.
- auto ReceiveClientTrailingMetadata(Target target);
-
- // Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
- // when the receive is complete.
- auto ReceiveServerInitialMetadata(Target target);
-
- // Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
- // when the receive is complete.
- auto ReceiveServerTrailingMetadata(Target target);
-
- // Send a cancellation: does not occupy the same payload, nor does it
- // coalesce with other ops.
- void Cancel(Target target, absl::Status status);
-
- private:
- struct Batch;
-
- // Base pending operation
- struct PendingCompletion {
- explicit PendingCompletion(RefCountedPtr<Batch> batch);
- virtual absl::string_view name() const = 0;
- static void CompletionCallback(void* self, grpc_error_handle error);
- grpc_closure on_done_closure;
- Latch<absl::Status> done_latch;
- RefCountedPtr<Batch> batch;
-
- protected:
- ~PendingCompletion() = default;
- };
-
- // A pending receive message.
- struct PendingReceiveMessage final : public PendingCompletion {
- using PendingCompletion::PendingCompletion;
-
- absl::string_view name() const override { return "receive_message"; }
-
- MessageHandle IntoMessageHandle() {
- return GetContext<Arena>()->MakePooled<Message>(std::move(*payload),
- flags);
- }
-
- absl::optional<SliceBuffer> payload;
- uint32_t flags;
- };
-
- // A pending receive metadata.
- struct PendingReceiveMetadata : public PendingCompletion {
- using PendingCompletion::PendingCompletion;
-
- Arena::PoolPtr<grpc_metadata_batch> metadata =
- GetContext<Arena>()->MakePooled<grpc_metadata_batch>(
- GetContext<Arena>());
-
- protected:
- ~PendingReceiveMetadata() = default;
- };
-
- struct PendingReceiveInitialMetadata final : public PendingReceiveMetadata {
- using PendingReceiveMetadata::PendingReceiveMetadata;
- absl::string_view name() const override {
- return "receive_initial_metadata";
- }
- };
-
- struct PendingReceiveTrailingMetadata final : public PendingReceiveMetadata {
- using PendingReceiveMetadata::PendingReceiveMetadata;
- absl::string_view name() const override {
- return "receive_trailing_metadata";
- }
- };
-
- // Pending sends in a batch
- struct PendingSends final : public PendingCompletion {
- using PendingCompletion::PendingCompletion;
-
- absl::string_view name() const override { return "sends"; }
-
- MessageHandle send_message;
- Arena::PoolPtr<grpc_metadata_batch> send_initial_metadata;
- Arena::PoolPtr<grpc_metadata_batch> send_trailing_metadata;
- bool trailing_metadata_sent = false;
- };
-
- // One outstanding batch.
- struct Batch final {
- Batch(grpc_transport_stream_op_batch_payload* payload,
- grpc_stream_refcount* stream_refcount);
- ~Batch();
- Batch(const Batch&) = delete;
- Batch& operator=(const Batch&) = delete;
- void IncrementRefCount() { ++refs; }
- void Unref() {
- if (--refs == 0) party->arena()->DeletePooled(this);
- }
- RefCountedPtr<Batch> Ref() {
- IncrementRefCount();
- return RefCountedPtr<Batch>(this);
- }
- // Get an initialized pending completion.
- // There are four pending completions potentially contained within a batch.
- // They can be rather large so we don't create all of them always. Instead,
- // we dynamically create them on the arena as needed.
- // This method either returns the existing completion in a batch if that
- // completion has already been initialized, or it creates a new completion
- // and returns that.
- template <typename T>
- T* GetInitializedCompletion(T*(Batch::*field)) {
- if (this->*field != nullptr) return this->*field;
- this->*field = party->arena()->NewPooled<T>(Ref());
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Add batch closure for %s @ %s",
- Activity::current()->DebugTag().c_str(),
- std::string((this->*field)->name()).c_str(),
- (this->*field)->on_done_closure.DebugString().c_str());
- }
- return this->*field;
- }
- // grpc_transport_perform_stream_op on target.stream
- void PerformWith(Target target);
- // Take a promise, and return a promise that holds a ref on this batch until
- // the promise completes or is cancelled.
- template <typename P>
- auto RefUntil(P promise) {
- return [self = Ref(), promise = std::move(promise)]() mutable {
- return promise();
- };
- }
-
- grpc_transport_stream_op_batch batch;
- PendingReceiveMessage* pending_receive_message = nullptr;
- PendingReceiveInitialMetadata* pending_receive_initial_metadata = nullptr;
- PendingReceiveTrailingMetadata* pending_receive_trailing_metadata = nullptr;
- PendingSends* pending_sends = nullptr;
- const RefCountedPtr<Party> party;
- grpc_stream_refcount* const stream_refcount;
- uint8_t refs = 0;
- };
-
- // Get a batch for the given target.
- // Currently: if the current batch is for this target, return it - otherwise
- // flush the batch and start a new one (and return that).
- // This function may change in the future to allow multiple batches to be
- // building at once (if that turns out to be useful for hedging).
- Batch* GetBatch(Target target);
- // Flush the current batch down to the transport.
- void FlushBatch();
- // Create a cancel batch with its own payload.
- Batch* MakeCancel(grpc_stream_refcount* stream_refcount, absl::Status status);
-
- // Note: we don't distinguish between client and server metadata here.
- // At the time of writing they're both the same thing - and it's unclear
- // whether we'll get to separate them prior to batches going away or not.
- // So for now we claim YAGNI and just do the simplest possible implementation.
- auto SendInitialMetadata(Target target,
- Arena::PoolPtr<grpc_metadata_batch> md);
- auto ReceiveInitialMetadata(Target target);
- auto ReceiveTrailingMetadata(Target target);
-
- // Combine send status and server metadata into a final status to report back
- // to the containing call.
- static ServerMetadataHandle CompleteSendServerTrailingMetadata(
- ServerMetadataHandle sent_metadata, absl::Status send_result,
- bool actually_sent);
-
- grpc_transport_stream_op_batch_payload* const payload_;
- absl::optional<Target> target_;
- Batch* batch_ = nullptr;
-};
-
-inline auto BatchBuilder::SendMessage(Target target, MessageHandle message) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Queue send message: %s",
- Activity::current()->DebugTag().c_str(),
- message->DebugString().c_str());
- }
- auto* batch = GetBatch(target);
- auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
- batch->batch.on_complete = &pc->on_done_closure;
- batch->batch.send_message = true;
- payload_->send_message.send_message = message->payload();
- payload_->send_message.flags = message->flags();
- pc->send_message = std::move(message);
- return batch->RefUntil(pc->done_latch.WaitAndCopy());
-}
-
-inline auto BatchBuilder::SendInitialMetadata(
- Target target, Arena::PoolPtr<grpc_metadata_batch> md) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Queue send initial metadata: %s",
- Activity::current()->DebugTag().c_str(), md->DebugString().c_str());
- }
- auto* batch = GetBatch(target);
- auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
- batch->batch.on_complete = &pc->on_done_closure;
- batch->batch.send_initial_metadata = true;
- payload_->send_initial_metadata.send_initial_metadata = md.get();
- pc->send_initial_metadata = std::move(md);
- return batch->RefUntil(pc->done_latch.WaitAndCopy());
-}
-
-inline auto BatchBuilder::SendClientInitialMetadata(
- Target target, ClientMetadataHandle metadata) {
- return SendInitialMetadata(target, std::move(metadata));
-}
-
-inline auto BatchBuilder::SendClientTrailingMetadata(Target target) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Queue send trailing metadata",
- Activity::current()->DebugTag().c_str());
- }
- auto* batch = GetBatch(target);
- auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
- batch->batch.on_complete = &pc->on_done_closure;
- batch->batch.send_trailing_metadata = true;
- auto metadata =
- GetContext<Arena>()->MakePooled<grpc_metadata_batch>(GetContext<Arena>());
- payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
- payload_->send_trailing_metadata.sent = nullptr;
- pc->send_trailing_metadata = std::move(metadata);
- return batch->RefUntil(pc->done_latch.WaitAndCopy());
-}
-
-inline auto BatchBuilder::SendServerInitialMetadata(
- Target target, ServerMetadataHandle metadata) {
- return SendInitialMetadata(target, std::move(metadata));
-}
-
-inline auto BatchBuilder::SendServerTrailingMetadata(
- Target target, ServerMetadataHandle metadata,
- bool convert_to_cancellation) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] %s: %s",
- Activity::current()->DebugTag().c_str(),
- convert_to_cancellation ? "Send trailing metadata as cancellation"
- : "Queue send trailing metadata",
- metadata->DebugString().c_str());
- }
- Batch* batch;
- PendingSends* pc;
- if (convert_to_cancellation) {
- const auto status_code =
- metadata->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN);
- auto status = grpc_error_set_int(
- absl::Status(static_cast<absl::StatusCode>(status_code),
- metadata->GetOrCreatePointer(GrpcMessageMetadata())
- ->as_string_view()),
- StatusIntProperty::kRpcStatus, status_code);
- batch = MakeCancel(target.stream_refcount, std::move(status));
- pc = batch->GetInitializedCompletion(&Batch::pending_sends);
- } else {
- batch = GetBatch(target);
- pc = batch->GetInitializedCompletion(&Batch::pending_sends);
- batch->batch.send_trailing_metadata = true;
- payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
- payload_->send_trailing_metadata.sent = &pc->trailing_metadata_sent;
- }
- batch->batch.on_complete = &pc->on_done_closure;
- pc->send_trailing_metadata = std::move(metadata);
- auto promise = batch->RefUntil(
- Map(pc->done_latch.WaitAndCopy(), [pc](absl::Status status) {
- return CompleteSendServerTrailingMetadata(
- std::move(pc->send_trailing_metadata), std::move(status),
- pc->trailing_metadata_sent);
- }));
- if (convert_to_cancellation) {
- batch->PerformWith(target);
- }
- return promise;
-}
-
-inline auto BatchBuilder::ReceiveMessage(Target target) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Queue receive message",
- Activity::current()->DebugTag().c_str());
- }
- auto* batch = GetBatch(target);
- auto* pc = batch->GetInitializedCompletion(&Batch::pending_receive_message);
- batch->batch.recv_message = true;
- payload_->recv_message.recv_message_ready = &pc->on_done_closure;
- payload_->recv_message.recv_message = &pc->payload;
- payload_->recv_message.flags = &pc->flags;
- return batch->RefUntil(
- Map(pc->done_latch.Wait(),
- [pc](absl::Status status)
- -> absl::StatusOr<absl::optional<MessageHandle>> {
- if (!status.ok()) return status;
- if (!pc->payload.has_value()) return absl::nullopt;
- return pc->IntoMessageHandle();
- }));
-}
-
-inline auto BatchBuilder::ReceiveInitialMetadata(Target target) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Queue receive initial metadata",
- Activity::current()->DebugTag().c_str());
- }
- auto* batch = GetBatch(target);
- auto* pc =
- batch->GetInitializedCompletion(&Batch::pending_receive_initial_metadata);
- batch->batch.recv_initial_metadata = true;
- payload_->recv_initial_metadata.recv_initial_metadata_ready =
- &pc->on_done_closure;
- payload_->recv_initial_metadata.recv_initial_metadata = pc->metadata.get();
- return batch->RefUntil(
- Map(pc->done_latch.Wait(),
- [pc](absl::Status status) -> absl::StatusOr<ClientMetadataHandle> {
- if (!status.ok()) return status;
- return std::move(pc->metadata);
- }));
-}
-
-inline auto BatchBuilder::ReceiveClientInitialMetadata(Target target) {
- return ReceiveInitialMetadata(target);
-}
-
-inline auto BatchBuilder::ReceiveServerInitialMetadata(Target target) {
- return ReceiveInitialMetadata(target);
-}
-
-inline auto BatchBuilder::ReceiveTrailingMetadata(Target target) {
- if (grpc_call_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s[connected] Queue receive trailing metadata",
- Activity::current()->DebugTag().c_str());
- }
- auto* batch = GetBatch(target);
- auto* pc = batch->GetInitializedCompletion(
- &Batch::pending_receive_trailing_metadata);
- batch->batch.recv_trailing_metadata = true;
- payload_->recv_trailing_metadata.recv_trailing_metadata_ready =
- &pc->on_done_closure;
- payload_->recv_trailing_metadata.recv_trailing_metadata = pc->metadata.get();
- payload_->recv_trailing_metadata.collect_stats =
- &GetContext<CallContext>()->call_stats()->transport_stream_stats;
- return batch->RefUntil(
- Map(pc->done_latch.Wait(),
- [pc](absl::Status status) -> absl::StatusOr<ServerMetadataHandle> {
- if (!status.ok()) return status;
- return std::move(pc->metadata);
- }));
-}
-
-inline auto BatchBuilder::ReceiveClientTrailingMetadata(Target target) {
- return ReceiveTrailingMetadata(target);
-}
-
-inline auto BatchBuilder::ReceiveServerTrailingMetadata(Target target) {
- return ReceiveTrailingMetadata(target);
-}
-
-template <>
-struct ContextType<BatchBuilder> {};
-
-} // namespace grpc_core
-
-#endif // GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 493a9f9..5b4f6d9 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -441,15 +441,6 @@
static absl::string_view DisplayValue(bool x) { return x ? "true" : "false"; }
};
-// Annotation to denote that this call qualifies for cancelled=1 for the
-// RECV_CLOSE_ON_SERVER op
-struct GrpcCallWasCancelled {
- static absl::string_view DebugKey() { return "GrpcCallWasCancelled"; }
- static constexpr bool kRepeatable = false;
- using ValueType = bool;
- static absl::string_view DisplayValue(bool x) { return x ? "true" : "false"; }
-};
-
// Annotation added by client surface code to denote wait-for-ready state
struct WaitForReady {
struct ValueType {
@@ -1387,8 +1378,7 @@
// Non-encodable things
grpc_core::GrpcStreamNetworkState, grpc_core::PeerString,
grpc_core::GrpcStatusContext, grpc_core::GrpcStatusFromWire,
- grpc_core::GrpcCallWasCancelled, grpc_core::WaitForReady,
- grpc_core::GrpcTrailersOnly>;
+ grpc_core::WaitForReady, grpc_core::GrpcTrailersOnly>;
struct grpc_metadata_batch : public grpc_metadata_batch_base {
using grpc_metadata_batch_base::grpc_metadata_batch_base;
diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc
index 055d298..6e2e48a 100644
--- a/src/core/lib/transport/transport.cc
+++ b/src/core/lib/transport/transport.cc
@@ -26,17 +26,13 @@
#include <new>
#include "absl/status/status.h"
-#include "absl/strings/str_cat.h"
#include <grpc/event_engine/event_engine.h>
-#include <grpc/grpc.h>
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gpr/alloc.h"
-#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice.h"
-#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/transport_impl.h"
grpc_core::DebugOnlyTraceFlag grpc_trace_stream_refcount(false,
@@ -275,35 +271,11 @@
ServerMetadataHandle ServerMetadataFromStatus(const absl::Status& status,
Arena* arena) {
auto hdl = arena->MakePooled<ServerMetadata>(arena);
- grpc_status_code code;
- std::string message;
- grpc_error_get_status(status, Timestamp::InfFuture(), &code, &message,
- nullptr, nullptr);
- hdl->Set(GrpcStatusMetadata(), code);
+ hdl->Set(GrpcStatusMetadata(), static_cast<grpc_status_code>(status.code()));
if (!status.ok()) {
- hdl->Set(GrpcMessageMetadata(), Slice::FromCopiedString(message));
+ hdl->Set(GrpcMessageMetadata(), Slice::FromCopiedString(status.message()));
}
return hdl;
}
-std::string Message::DebugString() const {
- std::string out = absl::StrCat(payload_.Length(), "b");
- auto flags = flags_;
- auto explain = [&flags, &out](uint32_t flag, absl::string_view name) {
- if (flags & flag) {
- flags &= ~flag;
- absl::StrAppend(&out, ":", name);
- }
- };
- explain(GRPC_WRITE_BUFFER_HINT, "write_buffer");
- explain(GRPC_WRITE_NO_COMPRESS, "no_compress");
- explain(GRPC_WRITE_THROUGH, "write_through");
- explain(GRPC_WRITE_INTERNAL_COMPRESS, "compress");
- explain(GRPC_WRITE_INTERNAL_TEST_ONLY_WAS_COMPRESSED, "was_compressed");
- if (flags != 0) {
- absl::StrAppend(&out, ":huh=0x", absl::Hex(flags));
- }
- return out;
-}
-
} // namespace grpc_core
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index d74aa47..af5611e 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -27,7 +27,6 @@
#include <functional>
#include <string>
-#include <type_traits>
#include <utility>
#include "absl/status/status.h"
@@ -54,7 +53,6 @@
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/detail/status.h"
-#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/pipe.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
@@ -107,8 +105,6 @@
SliceBuffer* payload() { return &payload_; }
const SliceBuffer* payload() const { return &payload_; }
- std::string DebugString() const;
-
private:
SliceBuffer payload_;
uint32_t flags_ = 0;
@@ -147,70 +143,11 @@
}
};
-// Move only type that tracks call startup.
-// Allows observation of when client_initial_metadata has been processed by the
-// end of the local call stack.
-// Interested observers can call Wait() to obtain a promise that will resolve
-// when all local client_initial_metadata processing has completed.
-// The result of this token is either true on successful completion, or false
-// if the metadata was not sent.
-// To set a successful completion, call Complete(true). For failure, call
-// Complete(false).
-// If Complete is not called, the destructor of a still held token will complete
-// with failure.
-// Transports should hold this token until client_initial_metadata has passed
-// any flow control (eg MAX_CONCURRENT_STREAMS for http2).
-class ClientInitialMetadataOutstandingToken {
- public:
- static ClientInitialMetadataOutstandingToken Empty() {
- return ClientInitialMetadataOutstandingToken();
- }
- static ClientInitialMetadataOutstandingToken New(
- Arena* arena = GetContext<Arena>()) {
- ClientInitialMetadataOutstandingToken token;
- token.latch_ = arena->New<Latch<bool>>();
- return token;
- }
-
- ClientInitialMetadataOutstandingToken(
- const ClientInitialMetadataOutstandingToken&) = delete;
- ClientInitialMetadataOutstandingToken& operator=(
- const ClientInitialMetadataOutstandingToken&) = delete;
- ClientInitialMetadataOutstandingToken(
- ClientInitialMetadataOutstandingToken&& other) noexcept
- : latch_(std::exchange(other.latch_, nullptr)) {}
- ClientInitialMetadataOutstandingToken& operator=(
- ClientInitialMetadataOutstandingToken&& other) noexcept {
- latch_ = std::exchange(other.latch_, nullptr);
- return *this;
- }
- ~ClientInitialMetadataOutstandingToken() {
- if (latch_ != nullptr) latch_->Set(false);
- }
- void Complete(bool success) { std::exchange(latch_, nullptr)->Set(success); }
-
- // Returns a promise that will resolve when this object (or its moved-from
- // ancestor) is dropped.
- auto Wait() { return latch_->Wait(); }
-
- private:
- ClientInitialMetadataOutstandingToken() = default;
-
- Latch<bool>* latch_ = nullptr;
-};
-
-using ClientInitialMetadataOutstandingTokenWaitType =
- decltype(std::declval<ClientInitialMetadataOutstandingToken>().Wait());
-
struct CallArgs {
// Initial metadata from the client to the server.
// During promise setup this can be manipulated by filters (and then
// passed on to the next filter).
ClientMetadataHandle client_initial_metadata;
- // Token indicating that client_initial_metadata is still being processed.
- // This should be moved around and only destroyed when the transport is
- // satisfied that the metadata has passed any flow control measures it has.
- ClientInitialMetadataOutstandingToken client_initial_metadata_outstanding;
// Initial metadata from the server to the client.
// Set once when it's available.
// During promise setup filters can substitute their own latch for this
@@ -393,12 +330,6 @@
/// Is this stream traced
bool is_traced : 1;
- bool HasOp() const {
- return send_initial_metadata || send_trailing_metadata || send_message ||
- recv_initial_metadata || recv_message || recv_trailing_metadata ||
- cancel_stream;
- }
-
//**************************************************************************
// remaining fields are initialized and used at the discretion of the
// current handler of the op
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index 0f5eece..d6d9122 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -38,13 +38,6 @@
// layers and initialized by the transport
size_t sizeof_stream; // = sizeof(transport stream)
- // HACK: inproc does not handle stream op batch callbacks correctly (receive
- // ops are required to complete prior to on_complete triggering).
- // This flag is used to disable coalescing of batches in connected_channel for
- // that specific transport.
- // TODO(ctiller): This ought not be necessary once we have promises complete.
- bool hacky_disable_stream_op_batch_coalescing_in_connected_channel;
-
// name of this transport implementation
const char* name;
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index ccff0cb..83b622e 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -661,7 +661,6 @@
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
- 'src/core/lib/promise/party.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
@@ -765,7 +764,6 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
- 'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
diff --git a/test/core/end2end/cq_verifier.cc b/test/core/end2end/cq_verifier.cc
index 0be24f5..16b7551 100644
--- a/test/core/end2end/cq_verifier.cc
+++ b/test/core/end2end/cq_verifier.cc
@@ -28,16 +28,13 @@
#include <utility>
#include <vector>
-#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
-#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
-#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@@ -123,35 +120,6 @@
ok = GRPC_SLICE_LENGTH(a) == GRPC_SLICE_LENGTH(b) &&
0 == memcmp(GRPC_SLICE_START_PTR(a), GRPC_SLICE_START_PTR(b),
GRPC_SLICE_LENGTH(a));
- if (!ok) {
- gpr_log(GPR_ERROR,
- "SLICE MISMATCH: left_length=%" PRIuPTR " right_length=%" PRIuPTR,
- GRPC_SLICE_LENGTH(a), GRPC_SLICE_LENGTH(b));
- std::string out;
- const char* a_str = reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(a));
- const char* b_str = reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(b));
- for (size_t i = 0; i < std::max(GRPC_SLICE_LENGTH(a), GRPC_SLICE_LENGTH(b));
- i++) {
- if (i >= GRPC_SLICE_LENGTH(a)) {
- absl::StrAppend(&out, "\u001b[36m", // cyan
- absl::CEscape(absl::string_view(&b_str[i], 1)),
- "\u001b[0m");
- } else if (i >= GRPC_SLICE_LENGTH(b)) {
- absl::StrAppend(&out, "\u001b[35m", // magenta
- absl::CEscape(absl::string_view(&a_str[i], 1)),
- "\u001b[0m");
- } else if (a_str[i] == b_str[i]) {
- absl::StrAppend(&out, absl::CEscape(absl::string_view(&a_str[i], 1)));
- } else {
- absl::StrAppend(&out, "\u001b[31m", // red
- absl::CEscape(absl::string_view(&a_str[i], 1)),
- "\u001b[33m", // yellow
- absl::CEscape(absl::string_view(&b_str[i], 1)),
- "\u001b[0m");
- }
- gpr_log(GPR_ERROR, "%s", out.c_str());
- }
- }
grpc_slice_unref(a);
grpc_slice_unref(b);
return ok;
diff --git a/test/core/end2end/fixtures/proxy.cc b/test/core/end2end/fixtures/proxy.cc
index 8e99c5b..c1b7da1 100644
--- a/test/core/end2end/fixtures/proxy.cc
+++ b/test/core/end2end/fixtures/proxy.cc
@@ -210,7 +210,7 @@
grpc_op op;
grpc_call_error err;
- grpc_byte_buffer_destroy(std::exchange(pc->c2p_msg, nullptr));
+ grpc_byte_buffer_destroy(pc->c2p_msg);
if (!pc->proxy->shutdown && success) {
op.op = GRPC_OP_RECV_MESSAGE;
op.flags = 0;
diff --git a/test/core/end2end/tests/filter_init_fails.cc b/test/core/end2end/tests/filter_init_fails.cc
index 753d7cd..ddae01f 100644
--- a/test/core/end2end/tests/filter_init_fails.cc
+++ b/test/core/end2end/tests/filter_init_fails.cc
@@ -42,10 +42,7 @@
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/promise/arena_promise.h"
-#include "src/core/lib/promise/promise.h"
#include "src/core/lib/surface/channel_stack_type.h"
-#include "src/core/lib/transport/transport.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/test_config.h"
@@ -400,23 +397,12 @@
static void destroy_channel_elem(grpc_channel_element* /*elem*/) {}
static const grpc_channel_filter test_filter = {
- grpc_call_next_op,
- [](grpc_channel_element*, grpc_core::CallArgs,
- grpc_core::NextPromiseFactory)
- -> grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle> {
- return grpc_core::Immediate(grpc_core::ServerMetadataFromStatus(
- absl::PermissionDeniedError("access denied")));
- },
- grpc_channel_next_op,
- 0,
- init_call_elem,
- grpc_call_stack_ignore_set_pollset_or_pollset_set,
- destroy_call_elem,
- 0,
- init_channel_elem,
- grpc_channel_stack_no_post_init,
- destroy_channel_elem,
- grpc_channel_next_get_info,
+ grpc_call_next_op, nullptr,
+ grpc_channel_next_op, 0,
+ init_call_elem, grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ destroy_call_elem, 0,
+ init_channel_elem, grpc_channel_stack_no_post_init,
+ destroy_channel_elem, grpc_channel_next_get_info,
"filter_init_fails"};
//******************************************************************************
diff --git a/test/core/end2end/tests/max_message_length.cc b/test/core/end2end/tests/max_message_length.cc
index e57cc94..3127db5 100644
--- a/test/core/end2end/tests/max_message_length.cc
+++ b/test/core/end2end/tests/max_message_length.cc
@@ -82,9 +82,6 @@
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
- grpc_slice expect_in_details = grpc_slice_from_copied_string(
- send_limit ? "Sent message larger than max (11 vs. 5)"
- : "Received message larger than max (11 vs. 5)");
int was_cancelled = 2;
grpc_channel_args* client_args = nullptr;
@@ -223,10 +220,13 @@
done:
GPR_ASSERT(status == GRPC_STATUS_RESOURCE_EXHAUSTED);
- GPR_ASSERT(grpc_slice_slice(details, expect_in_details) >= 0);
+ GPR_ASSERT(
+ grpc_slice_str_cmp(
+ details, send_limit
+ ? "Sent message larger than max (11 vs. 5)"
+ : "Received message larger than max (11 vs. 5)") == 0);
grpc_slice_unref(details);
- grpc_slice_unref(expect_in_details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
@@ -265,9 +265,6 @@
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
- grpc_slice expect_in_details = grpc_slice_from_copied_string(
- send_limit ? "Sent message larger than max (11 vs. 5)"
- : "Received message larger than max (11 vs. 5)");
int was_cancelled = 2;
grpc_channel_args* client_args = nullptr;
@@ -407,10 +404,13 @@
GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/service/method"));
GPR_ASSERT(status == GRPC_STATUS_RESOURCE_EXHAUSTED);
- GPR_ASSERT(grpc_slice_slice(details, expect_in_details) >= 0);
+ GPR_ASSERT(
+ grpc_slice_str_cmp(
+ details, send_limit
+ ? "Sent message larger than max (11 vs. 5)"
+ : "Received message larger than max (11 vs. 5)") == 0);
grpc_slice_unref(details);
- grpc_slice_unref(expect_in_details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
diff --git a/test/core/filters/client_auth_filter_test.cc b/test/core/filters/client_auth_filter_test.cc
index e0711c4..12f0e25 100644
--- a/test/core/filters/client_auth_filter_test.cc
+++ b/test/core/filters/client_auth_filter_test.cc
@@ -154,8 +154,7 @@
auto promise = filter->MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch_,
Arena::PooledDeleter(nullptr)),
- ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
- nullptr},
+ nullptr, nullptr, nullptr},
[&](CallArgs /*call_args*/) {
return ArenaPromise<ServerMetadataHandle>(
[&]() -> Poll<ServerMetadataHandle> {
@@ -184,8 +183,7 @@
auto promise = filter->MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch_,
Arena::PooledDeleter(nullptr)),
- ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
- nullptr},
+ nullptr, nullptr, nullptr},
[&](CallArgs /*call_args*/) {
return ArenaPromise<ServerMetadataHandle>(
[&]() -> Poll<ServerMetadataHandle> {
diff --git a/test/core/filters/client_authority_filter_test.cc b/test/core/filters/client_authority_filter_test.cc
index df2656e..ae86f71 100644
--- a/test/core/filters/client_authority_filter_test.cc
+++ b/test/core/filters/client_authority_filter_test.cc
@@ -71,8 +71,7 @@
auto promise = filter.MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch,
Arena::PooledDeleter(nullptr)),
- ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
- nullptr},
+ nullptr, nullptr, nullptr},
[&](CallArgs call_args) {
EXPECT_EQ(call_args.client_initial_metadata
->get_pointer(HttpAuthorityMetadata())
@@ -107,8 +106,7 @@
auto promise = filter.MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch,
Arena::PooledDeleter(nullptr)),
- ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
- nullptr},
+ nullptr, nullptr, nullptr},
[&](CallArgs call_args) {
EXPECT_EQ(call_args.client_initial_metadata
->get_pointer(HttpAuthorityMetadata())
diff --git a/test/core/filters/filter_fuzzer.cc b/test/core/filters/filter_fuzzer.cc
index ebf9853..fe0ccda 100644
--- a/test/core/filters/filter_fuzzer.cc
+++ b/test/core/filters/filter_fuzzer.cc
@@ -110,8 +110,6 @@
const grpc_transport_vtable kFakeTransportVTable = {
// sizeof_stream
0,
- // hacky_disable_stream_op_batch_coalescing_in_connected_channel
- false,
// name
"fake_transport",
// init_stream
@@ -404,16 +402,16 @@
public:
WakeCall(MainLoop* main_loop, uint32_t id)
: main_loop_(main_loop), id_(id) {}
- void Wakeup(WakeupMask) override {
+ void Wakeup(void*) override {
for (const uint32_t already : main_loop_->wakeups_) {
if (already == id_) return;
}
main_loop_->wakeups_.push_back(id_);
delete this;
}
- void Drop(WakeupMask) override { delete this; }
+ void Drop(void*) override { delete this; }
- std::string ActivityDebugTag(WakeupMask) const override {
+ std::string ActivityDebugTag(void*) const override {
return "WakeCall(" + std::to_string(id_) + ")";
}
@@ -478,7 +476,6 @@
auto* server_initial_metadata = arena_->New<Pipe<ServerMetadataHandle>>();
CallArgs call_args{std::move(*LoadMetadata(client_initial_metadata,
&client_initial_metadata_)),
- ClientInitialMetadataOutstandingToken::Empty(),
&server_initial_metadata->sender, nullptr, nullptr};
if (is_client) {
promise_ = main_loop_->channel_stack_->MakeClientCallPromise(
@@ -527,9 +524,9 @@
}
void Orphan() override { abort(); }
- void ForceImmediateRepoll(WakeupMask) override { context_->set_continue(); }
+ void ForceImmediateRepoll() override { context_->set_continue(); }
Waker MakeOwningWaker() override {
- return Waker(new WakeCall(main_loop_, id_), 0);
+ return Waker(new WakeCall(main_loop_, id_), nullptr);
}
Waker MakeNonOwningWaker() override { return MakeOwningWaker(); }
diff --git a/test/core/gprpp/ref_counted_test.cc b/test/core/gprpp/ref_counted_test.cc
index 0d58d13..990acf2 100644
--- a/test/core/gprpp/ref_counted_test.cc
+++ b/test/core/gprpp/ref_counted_test.cc
@@ -53,7 +53,7 @@
foo->Unref();
}
-class Value : public RefCounted<Value, PolymorphicRefCount, UnrefNoDelete> {
+class Value : public RefCounted<Value, PolymorphicRefCount, kUnrefNoDelete> {
public:
Value(int value, std::set<std::unique_ptr<Value>>* registry) : value_(value) {
registry->emplace(this);
@@ -108,7 +108,7 @@
class ValueInExternalAllocation
: public RefCounted<ValueInExternalAllocation, PolymorphicRefCount,
- UnrefCallDtor> {
+ kUnrefCallDtor> {
public:
explicit ValueInExternalAllocation(int value) : value_(value) {}
diff --git a/test/core/gprpp/thd_test.cc b/test/core/gprpp/thd_test.cc
index be10ca1..7561965 100644
--- a/test/core/gprpp/thd_test.cc
+++ b/test/core/gprpp/thd_test.cc
@@ -20,8 +20,6 @@
#include "src/core/lib/gprpp/thd.h"
-#include <atomic>
-
#include "gtest/gtest.h"
#include <grpc/support/sync.h>
@@ -51,7 +49,7 @@
}
// Test that we can create a number of threads, wait for them, and join them.
-TEST(ThreadTest, CanCreateWaitAndJoin) {
+static void test1(void) {
grpc_core::Thread thds[NUM_THREADS];
struct test t;
gpr_mu_init(&t.mu);
@@ -78,7 +76,7 @@
static void thd_body2(void* /*v*/) {}
// Test that we can create a number of threads and join them.
-TEST(ThreadTest, CanCreateSomeAndJoinThem) {
+static void test2(void) {
grpc_core::Thread thds[NUM_THREADS];
for (auto& th : thds) {
bool ok;
@@ -91,23 +89,11 @@
}
}
-// Test that we can create a thread with an AnyInvocable.
-TEST(ThreadTest, CanCreateWithAnyInvocable) {
- grpc_core::Thread thds[NUM_THREADS];
- std::atomic<int> count_run{0};
- for (auto& th : thds) {
- bool ok;
- th = grpc_core::Thread(
- "grpc_thread_body2_test",
- [&count_run]() { count_run.fetch_add(1, std::memory_order_relaxed); },
- &ok);
- ASSERT_TRUE(ok);
- th.Start();
- }
- for (auto& th : thds) {
- th.Join();
- }
- EXPECT_EQ(count_run.load(std::memory_order_relaxed), NUM_THREADS);
+// -------------------------------------------------
+
+TEST(ThdTest, MainTest) {
+ test1();
+ test2();
}
int main(int argc, char** argv) {
diff --git a/test/core/promise/BUILD b/test/core/promise/BUILD
index baaade6..313276e 100644
--- a/test/core/promise/BUILD
+++ b/test/core/promise/BUILD
@@ -127,10 +127,7 @@
# is.
name = "promise_map_test",
srcs = ["map_test.cc"],
- external_deps = [
- "absl/functional:any_invocable",
- "gtest",
- ],
+ external_deps = ["gtest"],
language = "c++",
tags = ["promise_test"],
uses_event_engine = False,
@@ -167,6 +164,7 @@
uses_event_engine = False,
uses_polling = False,
deps = [
+ "//:promise",
"//src/core:poll",
"//src/core:promise_factory",
],
@@ -310,6 +308,25 @@
)
grpc_cc_test(
+ name = "observable_test",
+ srcs = ["observable_test.cc"],
+ external_deps = [
+ "absl/status",
+ "gtest",
+ ],
+ language = "c++",
+ tags = ["promise_test"],
+ uses_event_engine = False,
+ uses_polling = False,
+ deps = [
+ "test_wakeup_schedulers",
+ "//:promise",
+ "//src/core:observable",
+ "//src/core:seq",
+ ],
+)
+
+grpc_cc_test(
name = "for_each_test",
srcs = ["for_each_test.cc"],
external_deps = ["gtest"],
@@ -368,7 +385,6 @@
name = "pipe_test",
srcs = ["pipe_test.cc"],
external_deps = [
- "absl/functional:function_ref",
"absl/status",
"gtest",
],
@@ -378,7 +394,6 @@
uses_polling = False,
deps = [
"test_wakeup_schedulers",
- "//:gpr",
"//:grpc",
"//:ref_counted_ptr",
"//src/core:activity",
@@ -417,7 +432,6 @@
srcs = ["promise_fuzzer.cc"],
corpus = "promise_fuzzer_corpus",
external_deps = [
- "absl/functional:any_invocable",
"absl/status",
"absl/types:optional",
],
@@ -512,6 +526,7 @@
"//:exec_ctx",
"//:gpr",
"//:grpc_unsecure",
+ "//:orphanable",
"//:ref_counted_ptr",
"//src/core:1999",
"//src/core:context",
diff --git a/test/core/promise/if_test.cc b/test/core/promise/if_test.cc
index 9965fa0..0a2ccc5 100644
--- a/test/core/promise/if_test.cc
+++ b/test/core/promise/if_test.cc
@@ -14,6 +14,8 @@
#include "src/core/lib/promise/if.h"
+#include <utility>
+
#include "gtest/gtest.h"
namespace grpc_core {
diff --git a/test/core/promise/latch_test.cc b/test/core/promise/latch_test.cc
index 3b18efc..07ab6ad 100644
--- a/test/core/promise/latch_test.cc
+++ b/test/core/promise/latch_test.cc
@@ -52,33 +52,6 @@
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
}
-TEST(LatchTest, WaitAndCopyWorks) {
- Latch<std::string> latch;
- StrictMock<MockFunction<void(absl::Status)>> on_done;
- EXPECT_CALL(on_done, Call(absl::OkStatus()));
- MakeActivity(
- [&latch] {
- return Seq(Join(latch.WaitAndCopy(), latch.WaitAndCopy(),
- [&latch]() {
- latch.Set(
- "Once a jolly swagman camped by a billabong, "
- "under the shade of a coolibah tree.");
- return true;
- }),
- [](std::tuple<std::string, std::string, bool> result) {
- EXPECT_EQ(std::get<0>(result),
- "Once a jolly swagman camped by a billabong, "
- "under the shade of a coolibah tree.");
- EXPECT_EQ(std::get<1>(result),
- "Once a jolly swagman camped by a billabong, "
- "under the shade of a coolibah tree.");
- return absl::OkStatus();
- });
- },
- NoWakeupScheduler(),
- [&on_done](absl::Status status) { on_done.Call(std::move(status)); });
-}
-
TEST(LatchTest, Void) {
Latch<void> latch;
StrictMock<MockFunction<void(absl::Status)>> on_done;
@@ -96,23 +69,6 @@
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
}
-TEST(LatchTest, ExternallyObservableVoid) {
- ExternallyObservableLatch<void> latch;
- StrictMock<MockFunction<void(absl::Status)>> on_done;
- EXPECT_CALL(on_done, Call(absl::OkStatus()));
- MakeActivity(
- [&latch] {
- return Seq(Join(latch.Wait(),
- [&latch]() {
- latch.Set();
- return true;
- }),
- [](std::tuple<Empty, bool>) { return absl::OkStatus(); });
- },
- NoWakeupScheduler(),
- [&on_done](absl::Status status) { on_done.Call(std::move(status)); });
-}
-
} // namespace grpc_core
int main(int argc, char** argv) {
diff --git a/test/core/promise/loop_test.cc b/test/core/promise/loop_test.cc
index 30fa4d1..36beb00 100644
--- a/test/core/promise/loop_test.cc
+++ b/test/core/promise/loop_test.cc
@@ -14,8 +14,6 @@
#include "src/core/lib/promise/loop.h"
-#include <utility>
-
#include "gtest/gtest.h"
#include "src/core/lib/promise/detail/basic_seq.h"
@@ -51,20 +49,6 @@
EXPECT_EQ(x, Poll<int>(42));
}
-TEST(LoopTest, CanAccessFactoryLambdaVariables) {
- int i = 0;
- auto x = Loop([p = &i]() {
- return [q = &p]() -> Poll<LoopCtl<int>> {
- ++**q;
- return Pending{};
- };
- });
- auto y = std::move(x);
- auto z = std::move(y);
- z();
- EXPECT_EQ(i, 1);
-}
-
} // namespace grpc_core
int main(int argc, char** argv) {
diff --git a/test/core/promise/map_test.cc b/test/core/promise/map_test.cc
index eacc666..d266654 100644
--- a/test/core/promise/map_test.cc
+++ b/test/core/promise/map_test.cc
@@ -14,7 +14,8 @@
#include "src/core/lib/promise/map.h"
-#include "absl/functional/any_invocable.h"
+#include <functional>
+
#include "gtest/gtest.h"
#include "src/core/lib/promise/promise.h"
diff --git a/test/core/promise/mpsc_test.cc b/test/core/promise/mpsc_test.cc
index 0a724cd..8001793 100644
--- a/test/core/promise/mpsc_test.cc
+++ b/test/core/promise/mpsc_test.cc
@@ -36,14 +36,14 @@
public:
MOCK_METHOD(void, WakeupRequested, ());
- void ForceImmediateRepoll(WakeupMask) override { WakeupRequested(); }
+ void ForceImmediateRepoll() override { WakeupRequested(); }
void Orphan() override {}
- Waker MakeOwningWaker() override { return Waker(this, 0); }
- Waker MakeNonOwningWaker() override { return Waker(this, 0); }
- void Wakeup(WakeupMask) override { WakeupRequested(); }
- void Drop(WakeupMask) override {}
+ Waker MakeOwningWaker() override { return Waker(this, nullptr); }
+ Waker MakeNonOwningWaker() override { return Waker(this, nullptr); }
+ void Wakeup(void*) override { WakeupRequested(); }
+ void Drop(void*) override {}
std::string DebugTag() const override { return "MockActivity"; }
- std::string ActivityDebugTag(WakeupMask) const override { return DebugTag(); }
+ std::string ActivityDebugTag(void*) const override { return DebugTag(); }
void Activate() {
if (scoped_activity_ != nullptr) return;
diff --git a/test/core/promise/observable_test.cc b/test/core/promise/observable_test.cc
new file mode 100644
index 0000000..c4bb925
--- /dev/null
+++ b/test/core/promise/observable_test.cc
@@ -0,0 +1,134 @@
+// Copyright 2021 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/core/lib/promise/observable.h"
+
+#include <functional>
+
+#include "absl/status/status.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "src/core/lib/promise/promise.h"
+#include "src/core/lib/promise/seq.h"
+#include "test/core/promise/test_wakeup_schedulers.h"
+
+using testing::MockFunction;
+using testing::StrictMock;
+
+namespace grpc_core {
+
+// A simple Barrier type: stalls progress until it is 'cleared'.
+class Barrier {
+ public:
+ struct Result {};
+
+ Promise<Result> Wait() {
+ return [this]() -> Poll<Result> {
+ MutexLock lock(&mu_);
+ if (cleared_) {
+ return Result{};
+ } else {
+ return wait_set_.AddPending(Activity::current()->MakeOwningWaker());
+ }
+ };
+ }
+
+ void Clear() {
+ mu_.Lock();
+ cleared_ = true;
+ auto wakeup = wait_set_.TakeWakeupSet();
+ mu_.Unlock();
+ wakeup.Wakeup();
+ }
+
+ private:
+ Mutex mu_;
+ WaitSet wait_set_ ABSL_GUARDED_BY(mu_);
+ bool cleared_ ABSL_GUARDED_BY(mu_) = false;
+};
+
+TEST(ObservableTest, CanPushAndGet) {
+ StrictMock<MockFunction<void(absl::Status)>> on_done;
+ Observable<int> observable;
+ auto observer = observable.MakeObserver();
+ auto activity = MakeActivity(
+ [&observer]() {
+ return Seq(observer.Get(), [](absl::optional<int> i) {
+ return i == 42 ? absl::OkStatus() : absl::UnknownError("expected 42");
+ });
+ },
+ InlineWakeupScheduler(),
+ [&on_done](absl::Status status) { on_done.Call(std::move(status)); });
+ EXPECT_CALL(on_done, Call(absl::OkStatus()));
+ observable.Push(42);
+}
+
+TEST(ObservableTest, CanNext) {
+ StrictMock<MockFunction<void(absl::Status)>> on_done;
+ Observable<int> observable;
+ auto observer = observable.MakeObserver();
+ auto activity = MakeActivity(
+ [&observer]() {
+ return Seq(
+ observer.Get(),
+ [&observer](absl::optional<int> i) {
+ EXPECT_EQ(i, 42);
+ return observer.Next();
+ },
+ [](absl::optional<int> i) {
+ return i == 1 ? absl::OkStatus()
+ : absl::UnknownError("expected 1");
+ });
+ },
+ InlineWakeupScheduler(),
+ [&on_done](absl::Status status) { on_done.Call(std::move(status)); });
+ observable.Push(42);
+ EXPECT_CALL(on_done, Call(absl::OkStatus()));
+ observable.Push(1);
+}
+
+TEST(ObservableTest, CanWatch) {
+ StrictMock<MockFunction<void(absl::Status)>> on_done;
+ Observable<int> observable;
+ Barrier barrier;
+ auto activity = MakeActivity(
+ [&observable, &barrier]() {
+ return observable.Watch(
+ [&barrier](int x,
+ WatchCommitter* committer) -> Promise<absl::Status> {
+ if (x == 3) {
+ committer->Commit();
+ return Seq(barrier.Wait(), Immediate(absl::OkStatus()));
+ } else {
+ return Never<absl::Status>();
+ }
+ });
+ },
+ InlineWakeupScheduler(),
+ [&on_done](absl::Status status) { on_done.Call(std::move(status)); });
+ observable.Push(1);
+ observable.Push(2);
+ observable.Push(3);
+ observable.Push(4);
+ EXPECT_CALL(on_done, Call(absl::OkStatus()));
+ barrier.Clear();
+}
+
+} // namespace grpc_core
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/test/core/promise/party_test.cc b/test/core/promise/party_test.cc
index e12e16d..bd0a6c2 100644
--- a/test/core/promise/party_test.cc
+++ b/test/core/promise/party_test.cc
@@ -28,6 +28,7 @@
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gprpp/notification.h"
+#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/time.h"
@@ -43,34 +44,19 @@
class AllocatorOwner {
protected:
- ~AllocatorOwner() { arena_->Destroy(); }
MemoryAllocator memory_allocator_ = MemoryAllocator(
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator("test"));
- Arena* arena_ = Arena::Create(1024, &memory_allocator_);
};
class TestParty final : public AllocatorOwner, public Party {
public:
- TestParty() : Party(AllocatorOwner::arena_, 1) {}
- ~TestParty() override {}
+ TestParty() : Party(Arena::Create(1024, &memory_allocator_)) {}
std::string DebugTag() const override { return "TestParty"; }
- using Party::IncrementRefCount;
- using Party::Unref;
-
- bool RunParty() override {
+ void Run() override {
promise_detail::Context<grpc_event_engine::experimental::EventEngine>
ee_ctx(ee_.get());
- return Party::RunParty();
- }
-
- void PartyOver() override {
- {
- promise_detail::Context<grpc_event_engine::experimental::EventEngine>
- ee_ctx(ee_.get());
- CancelRemainingParticipants();
- }
- delete this;
+ Party::Run();
}
private:
@@ -82,17 +68,14 @@
protected:
};
-TEST_F(PartyTest, Noop) { auto party = MakeRefCounted<TestParty>(); }
+TEST_F(PartyTest, Noop) { auto party = MakeOrphanable<TestParty>(); }
TEST_F(PartyTest, CanSpawnAndRun) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done = false;
party->Spawn(
- "TestSpawn",
[i = 10]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
- gpr_log(GPR_DEBUG, "i=%d", i);
- GPR_ASSERT(i > 0);
Activity::current()->ForceImmediateRepoll();
--i;
if (i == 0) return 42;
@@ -106,15 +89,13 @@
}
TEST_F(PartyTest, CanSpawnFromSpawn) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done1 = false;
bool done2 = false;
party->Spawn(
- "TestSpawn",
- [party, &done2]() -> Poll<int> {
+ [party = party.get(), &done2]() -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
party->Spawn(
- "TestSpawnInner",
[i = 10]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
Activity::current()->ForceImmediateRepoll();
@@ -137,11 +118,10 @@
}
TEST_F(PartyTest, CanWakeupWithOwningWaker) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
- "TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeOwningWaker();
@@ -161,11 +141,10 @@
}
TEST_F(PartyTest, CanWakeupWithNonOwningWaker) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
- "TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeNonOwningWaker();
@@ -185,11 +164,10 @@
}
TEST_F(PartyTest, CanWakeupWithNonOwningWakerAfterOrphaning) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
- "TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeNonOwningWaker();
@@ -210,11 +188,10 @@
}
TEST_F(PartyTest, CanDropNonOwningWakeAfterOrphaning) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done = false;
std::unique_ptr<Waker> waker;
party->Spawn(
- "TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker =
@@ -234,11 +211,10 @@
}
TEST_F(PartyTest, CanWakeupNonOwningOrphanedWakerWithNoEffect) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
- "TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeNonOwningWaker();
@@ -259,16 +235,15 @@
}
TEST_F(PartyTest, ThreadStressTest) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
std::vector<std::thread> threads;
- threads.reserve(8);
- for (int i = 0; i < 8; i++) {
- threads.emplace_back([party]() {
+ threads.reserve(16);
+ for (int i = 0; i < 16; i++) {
+ threads.emplace_back([party = party.get()]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
Notification promise_complete;
- party->Spawn("TestSpawn",
- Seq(Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
+ party->Spawn(Seq(Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
[]() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
EXPECT_EQ(i, 42);
@@ -323,17 +298,16 @@
};
TEST_F(PartyTest, ThreadStressTestWithOwningWaker) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
std::vector<std::thread> threads;
- threads.reserve(8);
- for (int i = 0; i < 8; i++) {
- threads.emplace_back([party]() {
+ threads.reserve(16);
+ for (int i = 0; i < 16; i++) {
+ threads.emplace_back([party = party.get()]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
PromiseNotification promise_start(true);
Notification promise_complete;
- party->Spawn("TestSpawn",
- Seq(promise_start.Wait(),
+ party->Spawn(Seq(promise_start.Wait(),
Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
[]() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
@@ -351,17 +325,16 @@
}
TEST_F(PartyTest, ThreadStressTestWithNonOwningWaker) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
std::vector<std::thread> threads;
- threads.reserve(8);
- for (int i = 0; i < 8; i++) {
- threads.emplace_back([party]() {
+ threads.reserve(16);
+ for (int i = 0; i < 16; i++) {
+ threads.emplace_back([party = party.get()]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
PromiseNotification promise_start(false);
Notification promise_complete;
- party->Spawn("TestSpawn",
- Seq(promise_start.Wait(),
+ party->Spawn(Seq(promise_start.Wait(),
Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
[]() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
@@ -379,16 +352,15 @@
}
TEST_F(PartyTest, ThreadStressTestWithOwningWakerNoSleep) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
std::vector<std::thread> threads;
- threads.reserve(8);
- for (int i = 0; i < 8; i++) {
- threads.emplace_back([party]() {
+ threads.reserve(16);
+ for (int i = 0; i < 16; i++) {
+ threads.emplace_back([party = party.get()]() {
for (int i = 0; i < 10000; i++) {
PromiseNotification promise_start(true);
Notification promise_complete;
party->Spawn(
- "TestSpawn",
Seq(promise_start.Wait(), []() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
EXPECT_EQ(i, 42);
@@ -405,16 +377,15 @@
}
TEST_F(PartyTest, ThreadStressTestWithNonOwningWakerNoSleep) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
std::vector<std::thread> threads;
- threads.reserve(8);
- for (int i = 0; i < 8; i++) {
- threads.emplace_back([party]() {
+ threads.reserve(16);
+ for (int i = 0; i < 16; i++) {
+ threads.emplace_back([party = party.get()]() {
for (int i = 0; i < 10000; i++) {
PromiseNotification promise_start(false);
Notification promise_complete;
party->Spawn(
- "TestSpawn",
Seq(promise_start.Wait(), []() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
EXPECT_EQ(i, 42);
@@ -431,22 +402,20 @@
}
TEST_F(PartyTest, ThreadStressTestWithInnerSpawn) {
- auto party = MakeRefCounted<TestParty>();
+ auto party = MakeOrphanable<TestParty>();
std::vector<std::thread> threads;
threads.reserve(8);
for (int i = 0; i < 8; i++) {
- threads.emplace_back([party]() {
+ threads.emplace_back([party = party.get()]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
PromiseNotification inner_start(true);
PromiseNotification inner_complete(false);
Notification promise_complete;
party->Spawn(
- "TestSpawn",
Seq(
[party, &inner_start, &inner_complete]() -> Poll<int> {
- party->Spawn("TestSpawnInner",
- Seq(inner_start.Wait(), []() { return 0; }),
+ party->Spawn(Seq(inner_start.Wait(), []() { return 0; }),
[&inner_complete](int i) {
EXPECT_EQ(i, 0);
inner_complete.Notify();
diff --git a/test/core/promise/pipe_test.cc b/test/core/promise/pipe_test.cc
index 4613c40..e78b4ac 100644
--- a/test/core/promise/pipe_test.cc
+++ b/test/core/promise/pipe_test.cc
@@ -19,7 +19,6 @@
#include <tuple>
#include <utility>
-#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -27,7 +26,6 @@
#include <grpc/event_engine/memory_allocator.h>
#include <grpc/grpc.h>
-#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/detail/basic_join.h"
@@ -383,58 +381,6 @@
ASSERT_TRUE(*done);
}
-TEST_F(PipeTest, AwaitClosedWorks) {
- StrictMock<MockFunction<void(absl::Status)>> on_done;
- EXPECT_CALL(on_done, Call(absl::OkStatus()));
- MakeActivity(
- [] {
- auto* pipe = GetContext<Arena>()->ManagedNew<Pipe<int>>();
- pipe->sender.InterceptAndMap([](int value) { return value + 1; });
- return Seq(
- // Concurrently:
- // - wait for closed on both ends
- // - close the sender, which will signal the receiver to return an
- // end-of-stream.
- Join(pipe->receiver.AwaitClosed(), pipe->sender.AwaitClosed(),
- [pipe]() mutable {
- pipe->sender.Close();
- return absl::OkStatus();
- }),
- // Verify we received end-of-stream and closed the sender.
- [](std::tuple<bool, bool, absl::Status> result) {
- EXPECT_FALSE(std::get<0>(result));
- EXPECT_FALSE(std::get<1>(result));
- EXPECT_EQ(std::get<2>(result), absl::OkStatus());
- return absl::OkStatus();
- });
- },
- NoWakeupScheduler(),
- [&on_done](absl::Status status) { on_done.Call(std::move(status)); },
- MakeScopedArena(1024, &memory_allocator_));
-}
-
-class FakeActivity final : public Activity {
- public:
- void Orphan() override {}
- void ForceImmediateRepoll(WakeupMask) override {}
- Waker MakeOwningWaker() override { Crash("Not implemented"); }
- Waker MakeNonOwningWaker() override { Crash("Not implemented"); }
- void Run(absl::FunctionRef<void()> f) {
- ScopedActivity activity(this);
- f();
- }
-};
-
-TEST_F(PipeTest, PollAckWaitsForReadyClosed) {
- FakeActivity().Run([]() {
- pipe_detail::Center<int> c;
- int i = 1;
- EXPECT_EQ(c.Push(&i), Poll<bool>(true));
- c.MarkClosed();
- EXPECT_EQ(c.PollAck(), Poll<bool>(Pending{}));
- });
-}
-
} // namespace grpc_core
int main(int argc, char** argv) {
diff --git a/test/core/promise/promise_factory_test.cc b/test/core/promise/promise_factory_test.cc
index 3690d78..d822bf1 100644
--- a/test/core/promise/promise_factory_test.cc
+++ b/test/core/promise/promise_factory_test.cc
@@ -14,10 +14,13 @@
#include "src/core/lib/promise/detail/promise_factory.h"
+#include <functional>
+
#include "absl/functional/bind_front.h"
#include "gtest/gtest.h"
#include "src/core/lib/promise/poll.h"
+#include "src/core/lib/promise/promise.h"
namespace grpc_core {
namespace promise_detail {
@@ -40,12 +43,13 @@
return Poll<int>(Poll<int>(42));
}).Make()(),
Poll<int>(42));
- EXPECT_EQ(
- MakeOnceFactory<void>([]() { return Poll<int>(Poll<int>(42)); }).Make()(),
- Poll<int>(42));
- EXPECT_EQ(MakeRepeatedFactory<void>([]() {
+ EXPECT_EQ(MakeOnceFactory<void>(Promise<int>([]() {
return Poll<int>(Poll<int>(42));
- }).Make()(),
+ })).Make()(),
+ Poll<int>(42));
+ EXPECT_EQ(MakeRepeatedFactory<void>(Promise<int>([]() {
+ return Poll<int>(Poll<int>(42));
+ })).Make()(),
Poll<int>(42));
}
diff --git a/test/core/promise/promise_fuzzer.cc b/test/core/promise/promise_fuzzer.cc
index cd60e1a..c777e94 100644
--- a/test/core/promise/promise_fuzzer.cc
+++ b/test/core/promise/promise_fuzzer.cc
@@ -19,7 +19,6 @@
#include <utility>
#include <vector>
-#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"
diff --git a/test/core/resource_quota/arena_test.cc b/test/core/resource_quota/arena_test.cc
index 6653643..4667ab5 100644
--- a/test/core/resource_quota/arena_test.cc
+++ b/test/core/resource_quota/arena_test.cc
@@ -22,7 +22,6 @@
#include <string.h>
#include <algorithm>
-#include <iosfwd>
#include <memory>
#include <ostream>
#include <string>
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index cdb948c..1f9d142 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -418,10 +418,17 @@
// implementation of grpc_transport_get_endpoint
grpc_endpoint* GetEndpoint(grpc_transport* /*self*/) { return nullptr; }
-static const grpc_transport_vtable phony_transport_vtable = {
- 0, false, "phony_http2", InitStream,
- nullptr, SetPollset, SetPollsetSet, PerformStreamOp,
- PerformOp, DestroyStream, Destroy, GetEndpoint};
+static const grpc_transport_vtable phony_transport_vtable = {0,
+ "phony_http2",
+ InitStream,
+ nullptr,
+ SetPollset,
+ SetPollsetSet,
+ PerformStreamOp,
+ PerformOp,
+ DestroyStream,
+ Destroy,
+ GetEndpoint};
static grpc_transport phony_transport = {&phony_transport_vtable};
diff --git a/tools/codegen/core/optimize_arena_pool_sizes.py b/tools/codegen/core/optimize_arena_pool_sizes.py
deleted file mode 100755
index bfae1c6..0000000
--- a/tools/codegen/core/optimize_arena_pool_sizes.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright 2023 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# USAGE:
-# Run some tests with the GRPC_ARENA_TRACE_POOLED_ALLOCATIONS #define turned on.
-# Capture the output to a text file.
-# Invoke this program with that as an argument, and let it work its magic.
-
-import collections
-import heapq
-import random
-import re
-import sys
-
-# A single allocation, negative size => free
-Allocation = collections.namedtuple('Allocation', 'size ptr')
-Active = collections.namedtuple('Active', 'id size')
-
-# Read through all the captures, and build up scrubbed traces
-arenas = []
-building = collections.defaultdict(list)
-active = {}
-biggest = 0
-smallest = 1024
-sizes = set()
-for filename in sys.argv[1:]:
- for line in open(filename):
- m = re.search(r'ARENA 0x([0-9a-f]+) ALLOC ([0-9]+) @ 0x([0-9a-f]+)',
- line)
- if m:
- size = int(m.group(2))
- if size > biggest:
- biggest = size
- if size < smallest:
- smallest = size
- active[m.group(3)] = Active(m.group(1), size)
- building[m.group(1)].append(size)
- sizes.add(size)
- m = re.search(r'FREE 0x([0-9a-f]+)', line)
- if m:
- # We may have spurious frees, so make sure there's an outstanding allocation
- last = active.pop(m.group(1), None)
- if last is not None:
- building[last.id].append(-last.size)
- m = re.search(r'DESTRUCT_ARENA 0x([0-9a-f]+)', line)
- if m:
- trace = building.pop(m.group(1), None)
- if trace:
- arenas.append(trace)
-
-
-# Given a list of pool sizes, return which bucket an allocation should go into
-def bucket(pool_sizes, size):
- for bucket in sorted(pool_sizes):
- if abs(size) <= bucket:
- return bucket
-
-
-# Given a list of pool sizes, determine the total outstanding bytes in the arena for once trace
-def outstanding_bytes(pool_sizes, trace):
- free_list = collections.defaultdict(int)
- allocated = 0
- for size in trace:
- b = bucket(pool_sizes, size)
- if size < 0:
- free_list[b] += 1
- else:
- if free_list[b] > 0:
- free_list[b] -= 1
- else:
- allocated += b
- return allocated + len(pool_sizes) * 8
-
-
-# Given a list of pool sizes, determine the maximum outstanding bytes for any seen trace
-def measure(pool_sizes):
- max_outstanding = 0
- for trace in arenas:
- max_outstanding = max(max_outstanding,
- outstanding_bytes(pool_sizes, trace))
- return max_outstanding
-
-
-ALWAYS_INCLUDE = 1024
-best = [ALWAYS_INCLUDE, biggest]
-best_measure = measure(best)
-
-testq = []
-step = 0
-
-
-def add(l):
- global testq, best_measure, best
- m = measure(l)
- if m < best_measure:
- best_measure = m
- best = l
- if l[-1] == smallest:
- return
- heapq.heappush(testq, (m, l))
-
-
-add(best)
-
-while testq:
- top = heapq.heappop(testq)[1]
- m = measure(top)
- step += 1
- if step % 1000 == 0:
- print("iter %d; pending=%d; top=%r/%d" %
- (step, len(testq), top, measure(top)))
- for i in sizes:
- if i >= top[-1]:
- continue
- add(top + [i])
-
-print("SAW SIZES: %r" % sorted(list(sizes)))
-print("BEST: %r" % list(reversed(best)))
-print("BEST MEASURE: %d" % best_measure)
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index b661394..4a41380 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -2399,14 +2399,12 @@
src/core/lib/promise/detail/status.h \
src/core/lib/promise/detail/switch.h \
src/core/lib/promise/exec_ctx_wakeup_scheduler.h \
-src/core/lib/promise/for_each.h \
src/core/lib/promise/if.h \
src/core/lib/promise/interceptor_list.h \
+src/core/lib/promise/intra_activity_waiter.h \
src/core/lib/promise/latch.h \
src/core/lib/promise/loop.h \
src/core/lib/promise/map.h \
-src/core/lib/promise/party.cc \
-src/core/lib/promise/party.h \
src/core/lib/promise/pipe.h \
src/core/lib/promise/poll.h \
src/core/lib/promise/promise.h \
@@ -2613,8 +2611,6 @@
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/validate_metadata.h \
src/core/lib/surface/version.cc \
-src/core/lib/transport/batch_builder.cc \
-src/core/lib/transport/batch_builder.h \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/bdp_estimator.h \
src/core/lib/transport/connectivity_state.cc \
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index 8037cfb..22bc220 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -2180,14 +2180,12 @@
src/core/lib/promise/detail/status.h \
src/core/lib/promise/detail/switch.h \
src/core/lib/promise/exec_ctx_wakeup_scheduler.h \
-src/core/lib/promise/for_each.h \
src/core/lib/promise/if.h \
src/core/lib/promise/interceptor_list.h \
+src/core/lib/promise/intra_activity_waiter.h \
src/core/lib/promise/latch.h \
src/core/lib/promise/loop.h \
src/core/lib/promise/map.h \
-src/core/lib/promise/party.cc \
-src/core/lib/promise/party.h \
src/core/lib/promise/pipe.h \
src/core/lib/promise/poll.h \
src/core/lib/promise/promise.h \
@@ -2396,8 +2394,6 @@
src/core/lib/surface/validate_metadata.h \
src/core/lib/surface/version.cc \
src/core/lib/transport/README.md \
-src/core/lib/transport/batch_builder.cc \
-src/core/lib/transport/batch_builder.h \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/bdp_estimator.h \
src/core/lib/transport/connectivity_state.cc \
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 01879ac..6068109 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -5049,6 +5049,30 @@
"ci_platforms": [
"linux",
"mac",
+ "posix",
+ "windows"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": true,
+ "language": "c++",
+ "name": "observable_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ],
+ "uses_polling": false
+ },
+ {
+ "args": [],
+ "benchmark": false,
+ "ci_platforms": [
+ "linux",
+ "mac",
"posix"
],
"cpu_cost": 1.0,