| # Copyright (C) 2020 The Android Open Source Project |
| # |
| # Licensed under the Apache License, Version 2.0 (the "License"); |
| # you may not use this file except in compliance with the License. |
| # You may obtain a copy of the License at |
| # |
| # http:#www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| """General-purpose crud for modernmp""" |
| |
| from contextlib import contextmanager |
| from functools import wraps |
| import logging |
| import os |
| import sys |
| import threading |
| import asyncio |
| import weakref |
| import types |
| from typing import Optional |
| from collections.abc import Mapping |
| |
| from signal import ( |
| SIGCHLD, |
| SIG_BLOCK, |
| SIG_DFL, |
| SIG_SETMASK, |
| getsignal, |
| pthread_sigmask, |
| signal, |
| ) |
| |
| import tblib |
| import tblib.pickling_support |
| |
| import futures_then |
| |
| logging.getLogger("asyncio").setLevel("INFO") |
| log = logging.getLogger(__name__) |
| |
| tblib.pickling_support.install() |
| del tblib |
| |
| IS_WINDOWS = (sys.platform == 'win32') |
| EOF = object() |
| |
| STARTUP_CWD = getattr(os, "__modernmp_startup_cwd", None) or os.getcwd() |
| |
| # Turns out pickling a traceback from a stack overflow itself causes a |
| # stack overflow, so limit exception stacks to this many frames. |
| EXCEPTION_STACK_LIMIT = 20 |
| |
| _INTERNAL_LOCK = threading.Lock() |
| |
| NoneType = type(None) |
| |
| class DummyContextManager: |
| """Context manager that does nothing""" |
| def __enter__(self): |
| pass |
| |
| def __exit__(self, _ign1, _ign2, _ign3): |
| pass |
| |
| DUMMY_CONTEXT_MANAGER = DummyContextManager() |
| |
| class SafeClosingObject(object): |
| """Inherit from this object to close safely""" |
| __closed = False |
| |
| @property |
| def closed(self): |
| """Return whether this object has at least begun to close.""" |
| return self.__closed |
| |
| def _do_close(self): |
| """Clean up the object. |
| |
| Called only once. Racing calls to close() will return |
| immediately. |
| """ |
| pass # Override me |
| |
| def close(self): |
| """Close the object. |
| |
| If another close is in progress, wait for that close to complete. |
| If object is already closed, do nothing. |
| """ |
| with _INTERNAL_LOCK: |
| old_closed = self.__closed |
| self.__closed = True |
| if not old_closed: |
| try: |
| self._do_close() |
| except: |
| die_due_to_fatal_exception("closing object") |
| |
| class ClosingContextManager(SafeClosingObject): |
| """Inherit from this class for only-once close support""" |
| |
| def __enter__(self): |
| return self |
| |
| def __exit__(self, exc_type, exc_val, exc_tb): |
| self.close() |
| |
| # N.B. We need these functions because standard exception picking |
| # doesn't capture the chain of causation, backtraces, or anything else |
| # except the exception type and arguments. |
| |
| def _copy_traceback(real_tb, depth): |
| from tblib import Frame, Traceback |
| # Copy frames iteratively to avoid stack depth trouble |
| reversed_fake_tb = None |
| while real_tb and depth: |
| tb = object.__new__(Traceback) |
| tb.tb_frame = Frame(real_tb.tb_frame) |
| tb.tb_lineno = real_tb.tb_lineno |
| tb.tb_next = reversed_fake_tb |
| reversed_fake_tb = tb |
| real_tb = real_tb.tb_next |
| depth -= 1 |
| # Reverse the list |
| fake_tb = None |
| while reversed_fake_tb: |
| tb = reversed_fake_tb |
| reversed_fake_tb = tb.tb_next |
| tb.tb_next = fake_tb |
| fake_tb = tb |
| return fake_tb and fake_tb.as_traceback() |
| |
| def _truncate_traceback(tb, depth_limit): |
| depth = 0 |
| tb_iter = tb |
| while tb_iter: |
| depth += 1 |
| tb_iter = tb_iter.tb_next |
| if depth > depth_limit: |
| tb = _copy_traceback(tb, depth_limit) |
| return tb |
| |
| def encode_exception(ex): |
| """Encode an exception and its referents for pickling. |
| |
| EX is either a BaseException or None. |
| """ |
| assert isinstance(ex, (BaseException, NoneType)) |
| if ex is None: |
| return None |
| return (ex, |
| _truncate_traceback(ex.__traceback__, 20), |
| encode_exception(ex.__context__), |
| encode_exception(ex.__cause__)) |
| |
| def decode_exception(saved): |
| """Rebuild an exception saved with encode_exception""" |
| ex, ex_tb, saved_context, saved_cause = saved |
| ex.__traceback__ = ex_tb |
| if saved_context is not None: |
| ex.__context__ = decode_exception(saved_context) |
| if saved_cause is not None: |
| ex.__cause__ = decode_exception(saved_cause) |
| return ex |
| |
| def capture_exc_info(): |
| """Capture current exception info for reraising.""" |
| return encode_exception(sys.exc_info()[1]) |
| |
| def reraise_exc_info(info): |
| """Reraise an exception captured with capture_exc_info.""" |
| if isinstance(info, BaseException): |
| raise info |
| raise decode_exception(info) |
| |
| class SignalsBlocked: |
| """Context manager that saves, restored blocked signal mask""" |
| def __init__(self, *signals): |
| self.__block_signals = set(signals) |
| self.__old_sigmask = None |
| |
| def restore_signals(self): |
| """Restore signal mask in effect on entry.""" |
| if not IS_WINDOWS: |
| if self.__old_sigmask is not None: |
| pthread_sigmask(SIG_SETMASK, self.__old_sigmask) |
| self.__old_sigmask = None |
| |
| def __enter__(self): |
| if not IS_WINDOWS: |
| assert self.__old_sigmask is None |
| self.__old_sigmask = pthread_sigmask(SIG_BLOCK, self.__block_signals) |
| return self |
| |
| def __exit__(self, exc_type, exc_val, exc_tb): |
| self.restore_signals() |
| |
| def safe_close(obj): |
| """DTRT to close OBJ. |
| |
| If OBJ is None, do nothing. If it's an int, close it like a file |
| descriptor. Otherwise, assume the object has a close method and call |
| it. |
| """ |
| if obj is not None: |
| if isinstance(obj, int) and obj >= 0: |
| os.close(obj) |
| else: |
| obj.close() |
| |
| def once(): |
| """Decorator that ensures that FUNCTION is called once. |
| |
| FUNCTION must have no arguments. |
| |
| The function gains "set" and "reset" methods that affect the stored |
| value. These methods are not thread safe and are intended to be |
| called immediately after fork. |
| """ |
| def _decorator(function): |
| # pylint: disable=dangerous-default-value |
| # pylint: disable=not-callable |
| no_value = object() |
| assert not function.__code__.co_argcount |
| state = [no_value, None] |
| @wraps(function) |
| def _wrapper(state=state, |
| function=function, |
| lock=threading.Lock(), |
| no_value=no_value, |
| value=no_value): |
| if value is not no_value: |
| # _set munging below isn't atomic, so we can actually get |
| # here. Behave like the fast wrapper in this case. |
| return value |
| value = state[0] |
| if value is no_value: |
| with lock: |
| value = state[0] |
| if value is no_value: |
| value = function() |
| state[1](value) # state[1] is _set |
| return value |
| orig_wrapper_code = _wrapper.__code__ |
| orig_wrapper_defaults = _wrapper.__defaults__ |
| def _reset(): |
| state[0] = no_value |
| _wrapper.__code__ = orig_wrapper_code |
| _wrapper.__defaults__ = orig_wrapper_defaults |
| _wrapper.reset = _reset |
| def _set(value): |
| assert value is not no_value |
| state[0] = value |
| def _fast_getter(value): |
| return value |
| _wrapper.__defaults__ = orig_wrapper_defaults[:-1] + (value,) |
| _wrapper.__code__ = _fast_getter.__code__ |
| _wrapper.set = _set |
| state[1] = _set |
| def _has_value(): |
| return state[0] is not no_value |
| _wrapper.has_value = _has_value |
| return _wrapper |
| return _decorator |
| |
| def the(type_, value): |
| """Just like Lisp""" |
| assert isinstance(value, type_), "wanted a {}: found a {}".format( |
| type_, type(value)) |
| return value |
| |
| def assert_seq_type(container_type, value_type, value): |
| """Check that the sequence meets the given constraints""" |
| assert isinstance(value, container_type), \ |
| "{!r} is not a {!r}".format(value, container_type) |
| assert all(isinstance(x, value_type) for x in value), \ |
| "elements not matching type spec {!r}: {!r}".format( |
| value_type, |
| [x for x in value if not isinstance(x, value_type)]) |
| return True |
| |
| def the_seq(container_type, value_type, value): |
| """Check a sequence and return it""" |
| if __debug__: |
| assert_seq_type(container_type, value_type, value) |
| return value |
| |
| def assert_map_type(container_type, key_type, value_type, value): |
| """Check that a mapping has the right types""" |
| assert isinstance(value, container_type), \ |
| "{!r} is a {!r}; expecting a {!r}".format( |
| value, type(value), container_type) |
| assert all(isinstance(k, key_type) for k in value.keys()) |
| assert all(isinstance(v, value_type) for v in value.values()), \ |
| "not all of type {}: {!r} {!r}".format( |
| value_type, |
| list(map(type, value.values())), |
| value) |
| return True |
| |
| def the_map(container_type, key_type, value_type, value): |
| """Check that a mapping has the right type and return it""" |
| if __debug__: |
| assert_map_type(container_type, key_type, value_type, value) |
| return value |
| |
| def ignore(*_args, **_kwargs): |
| """Ignore all arguments""" |
| pass |
| |
| class CachedProperty(object): |
| """Cheap cached properties using the __dict__ trick""" |
| def __init__(self, function): |
| self.__doc__ = function.__doc__ |
| self.__function = function |
| |
| def __get__(self, obj, cls): |
| if obj is None: |
| return self |
| function = self.__function |
| obj.__dict__[function.__name__] = value = function(obj) |
| return value |
| |
| cached_property = CachedProperty |
| |
| class CannotPickle(object): |
| """Mixin for classes that cannot be sent over IPC""" |
| def __reduce__(self): |
| raise RuntimeError("cannot pickle an object of type " + str(type(self))) |
| |
| class FdHolder(CannotPickle): |
| """Holder for file descriptor with automatic cleanup.""" |
| __fd = -1 |
| __borrow = None |
| |
| def __init__(self, fd=-1, *, borrow=None): |
| """Don't use FD directly""" |
| if borrow: |
| self.__borrow = borrow |
| assert isinstance(fd, int) |
| self.__fd = fd |
| |
| @cached_property |
| def fdid(self): |
| """Unique identifying cookie for the backing file""" |
| assert self.valid |
| return fdid(self.__fd) |
| |
| @staticmethod |
| def dup_of(fd): |
| """Make an FdHolder owning the same file object as FD. |
| |
| FD is either an int or some object that provides fileno(). |
| """ |
| if isinstance(fd, FdHolder): |
| return fd.dup() |
| if not isinstance(fd, int): |
| fd = fd.fileno() |
| return FdHolder(os.dup(fd)) |
| |
| @staticmethod |
| def steal(fd): |
| """Create an FdHolder that assumes ownership over FD""" |
| if not isinstance(fd, int): |
| fd = fd.detach() |
| return FdHolder(fd) |
| |
| @staticmethod |
| def borrow(obj): |
| """Create a non-owning "view" FdHolder on OBJ |
| |
| OBJ is kept alive as long as FdHolder itself is. |
| """ |
| fd = obj |
| if not isinstance(fd, int): |
| assert obj, "must evaluate to boolean true" |
| fd = fd.fileno() |
| return FdHolder(fd, borrow=obj) |
| |
| @property |
| def valid(self): |
| """Whether we hold a valid file descriptor""" |
| return self.__fd >= 0 # pylint: disable=compare-to-zero |
| |
| def dup(self): |
| """Create an independent clone of the held file""" |
| if not self.valid: |
| return FdHolder() |
| fdh = FdHolder(os.dup(self.__fd)) |
| my_fdid = self.__dict__.get("fdid") |
| if my_fdid: |
| fdh.__dict__["fdid"] = my_fdid |
| return fdh |
| |
| def detach(self): |
| """Transfer ownership of held descriptor to caller. |
| |
| Return a numeric file descriptor. |
| """ |
| if self.__borrow: |
| raise ValueError("cannot detach a borrowed object") |
| fd = self.__fd |
| del self.__fd |
| assert not self.valid |
| return fd |
| |
| def fileno(self): |
| """Return the actual file number held.""" |
| return self.__fd |
| |
| def close(self, close=os.close): |
| """Close the underlying file descriptor""" |
| borrow = self.__borrow |
| if borrow: |
| del self.__fd |
| del self.__borrow |
| elif self.valid: |
| close(self.detach()) |
| |
| def __del__(self): |
| self.close() |
| |
| def as_file(self, *args, steal=False, **kwargs): |
| """Make a file object out of the owned FD. |
| |
| The returned file object owns a duplicate of the underlying file |
| descriptor unless STEAL is true, in which case the returned file |
| object will close the FD detached from this object. |
| """ |
| assert "closefd" not in kwargs, "file object always owns" |
| dup_fd = self if steal else self.dup() |
| file_object = open(dup_fd.fileno(), *args, **kwargs) |
| dup_fd.detach() |
| return file_object |
| |
| def make_tls_setter(name): |
| """Return a context manager for NAME""" |
| @contextmanager |
| def _setter(self, value): |
| old_value = getattr(self, name, EOF) |
| setattr(self, name, value) |
| try: |
| yield value |
| finally: |
| if old_value is EOF: |
| delattr(self, name) |
| else: |
| setattr(self, name, old_value) |
| _setter.name = "set_" + name |
| return _setter |
| |
| class MpTls(threading.local): |
| """Thread-local storage for modernmp""" |
| def __init__(self): |
| super().__init__() |
| self.resmanc = None |
| self.unpickle_context = None |
| self.active_calls = () |
| |
| set_resmanc = make_tls_setter("resmanc") |
| set_resman_server = make_tls_setter("resman_server") |
| set_pickle_context = make_tls_setter("pickle_context") |
| set_unpickle_context = make_tls_setter("unpickle_context") |
| set_active_calls = make_tls_setter("active_calls") |
| |
| tls = MpTls() # pylint: disable=invalid-name |
| |
| def is_lock_owned_by_current_thread(lock): |
| """Return whether the current thread owns LOCK. |
| |
| Slow. USE ONLY FOR DEBUGGING. |
| """ |
| # The repr check is disgusting, but there's no other way to access |
| # the count. We need to check the count to catch the case where our |
| # try-acquire succeeds because the lock was unheld and we made the |
| # current thread grab it. |
| # pylint: disable=protected-access |
| locked = lock._is_owned() and lock.acquire(blocking=False) |
| success = locked and not repr(lock).endswith(" count=1>") |
| if locked: |
| lock.release() |
| return success |
| |
| def unix_pipe(): |
| """Like os.pipe(), but return FdHolders""" |
| read, write = os.pipe() |
| return FdHolder.steal(read), FdHolder.steal(write) |
| |
| def fdid(fd): |
| """Return a value that uniquely identifies an open file description""" |
| stat = os.fstat(fd) |
| return (stat.st_dev, stat.st_ino) |
| |
| # TODO(dancol): get rid of this future and just switch to the asyncio |
| # future everywhere |
| class ChainableFuture(futures_then.ThenableFuture): |
| """Bugfixed version of ThenableFuture""" |
| |
| def __new__(cls, *args, **kwargs): |
| # Hack to make the call inside then() return a ChainableFuture and |
| # not something else. |
| # pylint: disable=protected-access |
| if (not args and not kwargs and cls is not ChainableFuture |
| and (sys._getframe(1).f_code is |
| futures_then.ThenableFuture.then.__code__)): |
| return ChainableFuture() |
| return super().__new__(cls) |
| |
| def set_exception_info(self, ex, traceback): |
| """Compatibility with Python 2.7.x""" |
| assert ex.__traceback__ is traceback |
| self.set_exception(ex) |
| |
| def __call_once(self, fn, value): |
| with self._condition: |
| if self.done(): |
| return |
| callbacks = self._done_callbacks |
| self._done_callbacks = () |
| fn(value) |
| self._done_callbacks = callbacks |
| self._invoke_callbacks() |
| |
| def set_result(self, value): |
| self.__call_once(super().set_result, value) |
| |
| def set_exception(self, exception): |
| self.__call_once(super().set_exception, exception) |
| |
| def _invoke_callbacks(self): |
| assert self.done() |
| callbacks = self._done_callbacks |
| self._done_callbacks = () |
| while callbacks: |
| callback = callbacks.pop(0) |
| try: |
| callback(self) |
| except: |
| die_due_to_fatal_exception("future callback failed") |
| |
| @classmethod |
| def of(cls, value): |
| """Make a pre-completed future""" |
| future = cls() |
| future.set_result(value) |
| return future |
| |
| def __await__(self): |
| return iter(asyncio.wrap_future(self)) |
| |
| def maybe_this_thread_event_loop(): |
| """None or this thread's active event loop""" |
| policy = asyncio.get_event_loop_policy() |
| try: |
| return policy._local._loop # pylint: disable=protected-access |
| except AttributeError: |
| return asyncio.get_event_loop() |
| |
| def this_thread_event_loop(): |
| """Return the event loop associated with this thread |
| |
| Return the associated loop even if it isn't running. |
| """ |
| |
| loop = maybe_this_thread_event_loop() |
| assert loop, "no event loop configured for this thread" |
| return loop |
| |
| maybe_running_event_loop = asyncio._get_running_loop # pylint: disable=no-member,protected-access |
| if __debug__: |
| def running_event_loop(): |
| """Return the running event loop. |
| |
| Assert that the event loop is running. |
| """ |
| loop = maybe_running_event_loop() |
| assert loop, "no event loop running: did you forget to start it?" |
| return loop |
| else: |
| running_event_loop = maybe_running_event_loop |
| |
| @contextmanager |
| def current_event_loop(loop): |
| """Sets an event loop in the current thread""" |
| assert not maybe_this_thread_event_loop() |
| assert not maybe_running_event_loop() |
| assert not loop.is_closed() |
| asyncio.set_event_loop(loop) |
| try: |
| yield |
| finally: |
| asyncio.set_event_loop(None) |
| |
| def _do_die_due_to_fatal_exception(msg, ex): |
| if not ex: |
| _type, ex, _ = sys.exc_info() |
| if not ex: |
| ex = AssertionError("no exception pending?!") |
| log.critical("dying due to irrecoverable error %s", |
| msg, exc_info=ex, stack_info=True) |
| from signal import SIGABRT |
| os.kill(os.getpid(), SIGABRT) |
| os._exit(1) # pylint: disable=protected-access |
| while True: |
| pass |
| |
| def die_due_to_fatal_exception( |
| msg: str, |
| ex: Optional[BaseException] = None): |
| """Kill the program because something went wrong""" |
| return _do_die_due_to_fatal_exception(msg, ex) |
| |
| @contextmanager |
| def exceptions_are_fatal(msg): |
| """Context manager making exceptions fatal""" |
| try: |
| yield |
| except: |
| die_due_to_fatal_exception(msg) |
| |
| class ReferenceCountTracker(Mapping): |
| """Conveniently keep track of reference counts""" |
| |
| def __init__(self): |
| self.__references = {} |
| |
| def addref(self, key): |
| """Add a reference to KEY |
| |
| Return the _old_ reference count, allowing calling code to use a |
| boolean check to determine the need for initialization. |
| """ |
| old_rc = self.__references.get(key, 0) |
| self.__references[key] = old_rc + 1 |
| return old_rc |
| |
| def release(self, key, decrement_by=1): |
| """Release a reference to KEY. |
| |
| Return the _new_ reference count, allowing for calling code to use |
| a boolean check to establish the need for cleanup. |
| """ |
| new_rc = self.__references[key] - decrement_by |
| if new_rc < 0: # pylint: disable=compare-to-zero |
| raise ValueError("reference count underflow") |
| self.__references[key] = new_rc |
| if not new_rc: |
| del self.__references[key] |
| return new_rc |
| |
| def __contains__(self, key): |
| return key in self.__references |
| |
| def __getitem__(self, key): |
| return self.__references[key] |
| |
| def __iter__(self): |
| return iter(self.__references) |
| |
| def __len__(self): |
| return len(self.__references) |
| |
| def clear(self): |
| """Clear the dictionary""" |
| self.__references.clear() |
| |
| def __delitem__(self, key): |
| del self.__references[key] |
| |
| def weak_bind(method): |
| """Make a function that calls a method only if its object still exists |
| |
| The returned function always returns None whether or not the |
| object exists. |
| |
| This function is useful for avoiding reference cycles in future |
| callbacks. |
| """ |
| assert isinstance(method, types.MethodType) |
| func = method.__func__ |
| obj_wr = weakref.ref(method.__self__) |
| def _thunk(*args, **kwargs): |
| obj = obj_wr() |
| if obj: |
| func(obj, *args, **kwargs) |
| return _thunk |
| |
| class MultiDict(dict): |
| """Mapping from keys to potentially multiple elements""" |
| # There's also a multidict in pypi somewhere, but we don't need some |
| # silly heavyweight cython-using beast for our simple needs. |
| def __init__(self): |
| super().__init__() |
| self.__data = {} |
| |
| def add(self, key, value): |
| """Add a new value for KEY |
| |
| Return whether this is the first value we're adding for KEY. |
| """ |
| if key not in self: |
| super().__setitem__(key, {value}) |
| return True |
| self[key].add(value) |
| return False |
| |
| def remove(self, key, value): |
| """Remove a value from KEY. |
| |
| Return true if we removed the last value for KEY.""" |
| item_set = self[key] |
| item_set.remove(value) |
| if not item_set: |
| del self[key] |
| return True |
| return False |
| |
| @staticmethod |
| def __not_supported(): |
| raise NotImplementedError("dict ops") |
| |
| __setitem__ = __not_supported |
| update = __not_supported |
| |
| def __repr__(self): |
| return "<MultiDict {!r}>".format(super().__repr__()) |
| |
| def rename_code_object(code, new_name): |
| """Return a code object with a new internal function name""" |
| return code.replace(co_name=new_name) |
| |
| def int_ioctl(fd, code: int): |
| """Perform an ioctl on a file descriptor |
| |
| The ioctl is expected to write to an int* for output. Return that |
| int. |
| """ |
| from array import array |
| from fcntl import ioctl |
| if hasattr(fd, "fileno"): |
| fd = fd.fileno() |
| buf = array("i", [0]) |
| ioctl(fd, code, buf) |
| return buf[0] |
| |
| class ChildExitWatcher(SafeClosingObject): |
| """Watches for process exit using SIGCHLD""" |
| |
| @staticmethod |
| def check_main_thread(): |
| """Assert that we're running on the main thread""" |
| assert threading.current_thread() is threading.main_thread() |
| |
| def __init__(self): |
| # A single-valued dict is fine here: we can't reuse a pid until we |
| # wait, and only we wait. |
| self.__watchers = {} |
| |
| def __call__(self, _signo, _frame): |
| """The actual signal handler""" |
| try: |
| with self.reaping_blocked(): |
| self.check_main_thread() |
| while True: |
| try: |
| pid, status = os.waitpid(-1, os.WNOHANG) |
| except ChildProcessError: |
| break |
| if not pid: |
| break |
| watcher = self.__watchers.pop(pid, None) |
| if watcher: |
| watcher(status) |
| except: |
| die_due_to_fatal_exception("handling SIGCHLD") |
| |
| def __do_register(self, pid, callback): |
| # pylint: disable=compare-to-zero |
| assert pid not in self.__watchers and the(int, pid) > 0 |
| assert callback and callable(callback) |
| self.__watchers[pid] = callback |
| |
| @staticmethod |
| def __instance(): |
| ChildExitWatcher.check_main_thread() |
| instance = getsignal(SIGCHLD) |
| assert isinstance(instance, ChildExitWatcher) |
| return instance |
| |
| @classmethod |
| def register(cls, pid, callback): |
| """Register a callback for child death""" |
| # pylint: disable=protected-access |
| cls.__instance().__do_register(pid, callback) |
| |
| @staticmethod |
| @contextmanager |
| def reaping_blocked(): |
| """Context manager preventing child reaping""" |
| ChildExitWatcher.check_main_thread() |
| with SignalsBlocked(SIGCHLD): |
| yield |
| |
| @classmethod |
| def install(cls): |
| """Install the child exit hook""" |
| assert getsignal(SIGCHLD) == SIG_DFL |
| signal(SIGCHLD, ChildExitWatcher()) |
| |
| @classmethod |
| def uninstall(cls): |
| """Remove the child exit hook""" |
| assert cls.__instance() |
| cls.__instance().__watchers.clear() # pylint: disable=protected-access |
| signal(SIGCHLD, SIG_DFL) |