API refactor
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-07 16:25:52 +09:00
parent 76d0d86211
commit 91c7e04474
1171 changed files with 81940 additions and 44117 deletions

View File

@@ -103,26 +103,35 @@ def _get_job_writer(job):
return writer() # is a weakref
def _ensure_integral_fd(fd):
return fd if isinstance(fd, Integral) else fd.fileno()
if hasattr(select, 'poll'):
def _select_imp(readers=None, writers=None, err=None, timeout=0,
poll=select.poll, POLLIN=select.POLLIN,
POLLOUT=select.POLLOUT, POLLERR=select.POLLERR):
poller = poll()
register = poller.register
fd_to_mask = {}
if readers:
[register(fd, POLLIN) for fd in readers]
for fd in map(_ensure_integral_fd, readers):
fd_to_mask[fd] = fd_to_mask.get(fd, 0) | POLLIN
if writers:
[register(fd, POLLOUT) for fd in writers]
for fd in map(_ensure_integral_fd, writers):
fd_to_mask[fd] = fd_to_mask.get(fd, 0) | POLLOUT
if err:
[register(fd, POLLERR) for fd in err]
for fd in map(_ensure_integral_fd, err):
fd_to_mask[fd] = fd_to_mask.get(fd, 0) | POLLERR
for fd, event_mask in fd_to_mask.items():
register(fd, event_mask)
R, W = set(), set()
timeout = 0 if timeout and timeout < 0 else round(timeout * 1e3)
events = poller.poll(timeout)
for fd, event in events:
if not isinstance(fd, Integral):
fd = fd.fileno()
if event & POLLIN:
R.add(fd)
if event & POLLOUT:
@@ -194,7 +203,7 @@ def iterate_file_descriptors_safely(fds_iter, source_data,
or possibly other reasons, so safely manage our lists of FDs.
:param fds_iter: the file descriptors to iterate and apply hub_method
:param source_data: data source to remove FD if it renders OSError
:param hub_method: the method to call with with each fd and kwargs
:param hub_method: the method to call with each fd and kwargs
:*args to pass through to the hub_method;
with a special syntax string '*fd*' represents a substitution
for the current fd object in the iteration (for some callers).
@@ -772,7 +781,7 @@ class AsynPool(_pool.Pool):
None, WRITE | ERR, consolidate=True)
else:
iterate_file_descriptors_safely(
inactive, all_inqueues, hub_remove)
inactive, all_inqueues, hub.remove_writer)
self.on_poll_start = on_poll_start
def on_inqueue_close(fd, proc):
@@ -818,7 +827,7 @@ class AsynPool(_pool.Pool):
# worker is already busy with another task
continue
if ready_fd not in all_inqueues:
hub_remove(ready_fd)
hub.remove_writer(ready_fd)
continue
try:
job = pop_message()
@@ -829,7 +838,7 @@ class AsynPool(_pool.Pool):
# this may create a spinloop where the event loop
# always wakes up.
for inqfd in diff(active_writes):
hub_remove(inqfd)
hub.remove_writer(inqfd)
break
else:
@@ -927,7 +936,7 @@ class AsynPool(_pool.Pool):
else:
errors = 0
finally:
hub_remove(fd)
hub.remove_writer(fd)
write_stats[proc.index] += 1
# message written, so this fd is now available
active_writes.discard(fd)

View File

@@ -1,4 +1,6 @@
"""Gevent execution pool."""
import functools
import types
from time import monotonic
from kombu.asynchronous import timer as _timer
@@ -16,15 +18,22 @@ __all__ = ('TaskPool',)
# We cache globals and attribute lookups, so disable this warning.
def apply_target(target, args=(), kwargs=None, callback=None,
accept_callback=None, getpid=None, **_):
kwargs = {} if not kwargs else kwargs
return base.apply_target(target, args, kwargs, callback, accept_callback,
pid=getpid(), **_)
def apply_timeout(target, args=(), kwargs=None, callback=None,
accept_callback=None, pid=None, timeout=None,
accept_callback=None, getpid=None, timeout=None,
timeout_callback=None, Timeout=Timeout,
apply_target=base.apply_target, **rest):
kwargs = {} if not kwargs else kwargs
try:
with Timeout(timeout):
return apply_target(target, args, kwargs, callback,
accept_callback, pid,
accept_callback, getpid(),
propagate=(Timeout,), **rest)
except Timeout:
return timeout_callback(False, timeout)
@@ -82,18 +91,22 @@ class TaskPool(base.BasePool):
is_green = True
task_join_will_block = False
_pool = None
_pool_map = None
_quick_put = None
def __init__(self, *args, **kwargs):
from gevent import spawn_raw
from gevent import getcurrent, spawn_raw
from gevent.pool import Pool
self.Pool = Pool
self.getcurrent = getcurrent
self.getpid = lambda: id(getcurrent())
self.spawn_n = spawn_raw
self.timeout = kwargs.get('timeout')
super().__init__(*args, **kwargs)
def on_start(self):
self._pool = self.Pool(self.limit)
self._pool_map = {}
self._quick_put = self._pool.spawn
def on_stop(self):
@@ -102,12 +115,15 @@ class TaskPool(base.BasePool):
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, timeout=None,
timeout_callback=None, apply_target=base.apply_target, **_):
timeout_callback=None, apply_target=apply_target, **_):
timeout = self.timeout if timeout is None else timeout
return self._quick_put(apply_timeout if timeout else apply_target,
target, args, kwargs, callback, accept_callback,
timeout=timeout,
timeout_callback=timeout_callback)
target = self._make_killable_target(target)
greenlet = self._quick_put(apply_timeout if timeout else apply_target,
target, args, kwargs, callback, accept_callback,
self.getpid, timeout=timeout, timeout_callback=timeout_callback)
self._add_to_pool_map(id(greenlet), greenlet)
greenlet.terminate = types.MethodType(_terminate, greenlet)
return greenlet
def grow(self, n=1):
self._pool._semaphore.counter += n
@@ -117,6 +133,39 @@ class TaskPool(base.BasePool):
self._pool._semaphore.counter -= n
self._pool.size -= n
def terminate_job(self, pid, signal=None):
import gevent
if pid in self._pool_map:
greenlet = self._pool_map[pid]
gevent.kill(greenlet)
@property
def num_processes(self):
return len(self._pool)
@staticmethod
def _make_killable_target(target):
def killable_target(*args, **kwargs):
from greenlet import GreenletExit
try:
return target(*args, **kwargs)
except GreenletExit:
return (False, None, None)
return killable_target
def _add_to_pool_map(self, pid, greenlet):
self._pool_map[pid] = greenlet
greenlet.link(
functools.partial(self._cleanup_after_job_finish, pid=pid, pool_map=self._pool_map),
)
@staticmethod
def _cleanup_after_job_finish(greenlet, pool_map, pid):
del pool_map[pid]
def _terminate(self, signal):
# Done in `TaskPool.terminate_job`
pass