main commit
This commit is contained in:
188
venv/lib/python3.12/site-packages/eventlet/hubs/__init__.py
Normal file
188
venv/lib/python3.12/site-packages/eventlet/hubs/__init__.py
Normal file
@@ -0,0 +1,188 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
import warnings
|
||||
|
||||
from eventlet import patcher
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
|
||||
__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
|
||||
|
||||
threading = patcher.original('threading')
|
||||
_threadlocal = threading.local()
|
||||
|
||||
|
||||
# order is important, get_default_hub returns first available from here
|
||||
builtin_hub_names = ('epolls', 'kqueue', 'poll', 'selects')
|
||||
builtin_hub_modules = tuple(importlib.import_module('eventlet.hubs.' + name) for name in builtin_hub_names)
|
||||
|
||||
|
||||
class HubError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_default_hub():
|
||||
"""Select the default hub implementation based on what multiplexing
|
||||
libraries are installed. The order that the hubs are tried is:
|
||||
|
||||
* epoll
|
||||
* kqueue
|
||||
* poll
|
||||
* select
|
||||
|
||||
.. include:: ../../doc/source/common.txt
|
||||
.. note :: |internal|
|
||||
"""
|
||||
for mod in builtin_hub_modules:
|
||||
if mod.is_available():
|
||||
return mod
|
||||
|
||||
raise HubError('no built-in hubs are available: {}'.format(builtin_hub_modules))
|
||||
|
||||
|
||||
def use_hub(mod=None):
|
||||
"""Use the module *mod*, containing a class called Hub, as the
|
||||
event hub. Usually not required; the default hub is usually fine.
|
||||
|
||||
`mod` can be an actual hub class, a module, a string, or None.
|
||||
|
||||
If `mod` is a class, use it directly.
|
||||
If `mod` is a module, use `module.Hub` class
|
||||
If `mod` is a string and contains either '.' or ':'
|
||||
then `use_hub` uses 'package.subpackage.module:Class' convention,
|
||||
otherwise imports `eventlet.hubs.mod`.
|
||||
If `mod` is None, `use_hub` uses the default hub.
|
||||
|
||||
Only call use_hub during application initialization,
|
||||
because it resets the hub's state and any existing
|
||||
timers or listeners will never be resumed.
|
||||
|
||||
These two threadlocal attributes are not part of Eventlet public API:
|
||||
- `threadlocal.Hub` (capital H) is hub constructor, used when no hub is currently active
|
||||
- `threadlocal.hub` (lowercase h) is active hub instance
|
||||
"""
|
||||
if mod is None:
|
||||
mod = os.environ.get('EVENTLET_HUB', None)
|
||||
if mod is None:
|
||||
mod = get_default_hub()
|
||||
if hasattr(_threadlocal, 'hub'):
|
||||
del _threadlocal.hub
|
||||
|
||||
classname = ''
|
||||
if isinstance(mod, str):
|
||||
if mod.strip() == "":
|
||||
raise RuntimeError("Need to specify a hub")
|
||||
if '.' in mod or ':' in mod:
|
||||
modulename, _, classname = mod.strip().partition(':')
|
||||
else:
|
||||
modulename = 'eventlet.hubs.' + mod
|
||||
mod = importlib.import_module(modulename)
|
||||
|
||||
if hasattr(mod, 'is_available'):
|
||||
if not mod.is_available():
|
||||
raise Exception('selected hub is not available on this system mod={}'.format(mod))
|
||||
else:
|
||||
msg = '''Please provide `is_available()` function in your custom Eventlet hub {mod}.
|
||||
It must return bool: whether hub supports current platform. See eventlet/hubs/{{epoll,kqueue}} for example.
|
||||
'''.format(mod=mod)
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=3)
|
||||
|
||||
hubclass = mod
|
||||
if not inspect.isclass(mod):
|
||||
hubclass = getattr(mod, classname or 'Hub')
|
||||
|
||||
_threadlocal.Hub = hubclass
|
||||
|
||||
|
||||
def get_hub():
|
||||
"""Get the current event hub singleton object.
|
||||
|
||||
.. note :: |internal|
|
||||
"""
|
||||
try:
|
||||
hub = _threadlocal.hub
|
||||
except AttributeError:
|
||||
try:
|
||||
_threadlocal.Hub
|
||||
except AttributeError:
|
||||
use_hub()
|
||||
hub = _threadlocal.hub = _threadlocal.Hub()
|
||||
return hub
|
||||
|
||||
|
||||
# Lame middle file import because complex dependencies in import graph
|
||||
from eventlet import timeout
|
||||
|
||||
|
||||
def trampoline(fd, read=None, write=None, timeout=None,
|
||||
timeout_exc=timeout.Timeout,
|
||||
mark_as_closed=None):
|
||||
"""Suspend the current coroutine until the given socket object or file
|
||||
descriptor is ready to *read*, ready to *write*, or the specified
|
||||
*timeout* elapses, depending on arguments specified.
|
||||
|
||||
To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
|
||||
write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
|
||||
argument in seconds.
|
||||
|
||||
If the specified *timeout* elapses before the socket is ready to read or
|
||||
write, *timeout_exc* will be raised instead of ``trampoline()``
|
||||
returning normally.
|
||||
|
||||
.. note :: |internal|
|
||||
"""
|
||||
t = None
|
||||
hub = get_hub()
|
||||
current = greenlet.getcurrent()
|
||||
if hub.greenlet is current:
|
||||
raise RuntimeError('do not call blocking functions from the mainloop')
|
||||
if (read and write):
|
||||
raise RuntimeError('not allowed to trampoline for reading and writing')
|
||||
try:
|
||||
fileno = fd.fileno()
|
||||
except AttributeError:
|
||||
fileno = fd
|
||||
if timeout is not None:
|
||||
def _timeout(exc):
|
||||
# This is only useful to insert debugging
|
||||
current.throw(exc)
|
||||
t = hub.schedule_call_global(timeout, _timeout, timeout_exc)
|
||||
try:
|
||||
if read:
|
||||
listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
|
||||
elif write:
|
||||
listener = hub.add(hub.WRITE, fileno, current.switch, current.throw, mark_as_closed)
|
||||
try:
|
||||
return hub.switch()
|
||||
finally:
|
||||
hub.remove(listener)
|
||||
finally:
|
||||
if t is not None:
|
||||
t.cancel()
|
||||
|
||||
|
||||
def notify_close(fd):
|
||||
"""
|
||||
A particular file descriptor has been explicitly closed. Register for any
|
||||
waiting listeners to be notified on the next run loop.
|
||||
"""
|
||||
hub = get_hub()
|
||||
hub.notify_close(fd)
|
||||
|
||||
|
||||
def notify_opened(fd):
|
||||
"""
|
||||
Some file descriptors may be closed 'silently' - that is, by the garbage
|
||||
collector, by an external library, etc. When the OS returns a file descriptor
|
||||
from an open call (or something similar), this may be the only indication we
|
||||
have that the FD has been closed and then recycled.
|
||||
We let the hub know that the old file descriptor is dead; any stuck listeners
|
||||
will be disabled and notified in turn.
|
||||
"""
|
||||
hub = get_hub()
|
||||
hub.mark_as_reopened(fd)
|
||||
|
||||
|
||||
class IOClosed(IOError):
|
||||
pass
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
174
venv/lib/python3.12/site-packages/eventlet/hubs/asyncio.py
Normal file
174
venv/lib/python3.12/site-packages/eventlet/hubs/asyncio.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""
|
||||
Asyncio-based hub, originally implemented by Miguel Grinberg.
|
||||
"""
|
||||
|
||||
# The various modules involved in asyncio need to call the original, unpatched
|
||||
# standard library APIs to work: socket, select, threading, and so on. We
|
||||
# therefore don't import them on the module level, since that would involve
|
||||
# their imports getting patched, and instead delay importing them as much as
|
||||
# possible. Then, we do a little song and dance in Hub.__init__ below so that
|
||||
# when they're imported they import the original modules (select, socket, etc)
|
||||
# rather than the patched ones.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from eventlet.hubs import hub
|
||||
from eventlet.patcher import _unmonkey_patch_asyncio_all
|
||||
|
||||
|
||||
def is_available():
|
||||
"""
|
||||
Indicate whether this hub is available, since some hubs are
|
||||
platform-specific.
|
||||
|
||||
Python always has asyncio, so this is always ``True``.
|
||||
"""
|
||||
return True
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
"""An Eventlet hub implementation on top of an asyncio event loop."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
# Pre-emptively make sure we're using the right modules:
|
||||
_unmonkey_patch_asyncio_all()
|
||||
|
||||
# The presumption is that eventlet is driving the event loop, so we
|
||||
# want a new one we control.
|
||||
import asyncio
|
||||
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.sleep_event = asyncio.Event()
|
||||
|
||||
import asyncio.events
|
||||
if hasattr(asyncio.events, "on_fork"):
|
||||
# Allow post-fork() child to continue using the same event loop.
|
||||
# This is a terrible idea.
|
||||
asyncio.events.on_fork.__code__ = (lambda: None).__code__
|
||||
else:
|
||||
# On Python 3.9-3.11, there's a thread local we need to reset.
|
||||
# Also a terrible idea.
|
||||
def re_register_loop(loop=self.loop):
|
||||
asyncio.events._set_running_loop(loop)
|
||||
|
||||
os.register_at_fork(after_in_child=re_register_loop)
|
||||
|
||||
def add_timer(self, timer):
|
||||
"""
|
||||
Register a ``Timer``.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
super().add_timer(timer)
|
||||
self.sleep_event.set()
|
||||
|
||||
def _file_cb(self, cb, fileno):
|
||||
"""
|
||||
Callback called by ``asyncio`` when a file descriptor has an event.
|
||||
"""
|
||||
try:
|
||||
cb(fileno)
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
self.sleep_event.set()
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
"""
|
||||
Add a file descriptor of given event type to the ``Hub``. See the
|
||||
superclass for details.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
try:
|
||||
os.fstat(fileno)
|
||||
except OSError:
|
||||
raise ValueError("Invalid file descriptor")
|
||||
already_listening = self.listeners[evtype].get(fileno) is not None
|
||||
listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
|
||||
if not already_listening:
|
||||
if evtype == hub.READ:
|
||||
self.loop.add_reader(fileno, self._file_cb, cb, fileno)
|
||||
else:
|
||||
self.loop.add_writer(fileno, self._file_cb, cb, fileno)
|
||||
return listener
|
||||
|
||||
def remove(self, listener):
|
||||
"""
|
||||
Remove a listener from the ``Hub``. See the superclass for details.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
super().remove(listener)
|
||||
evtype = listener.evtype
|
||||
fileno = listener.fileno
|
||||
if not self.listeners[evtype].get(fileno):
|
||||
if evtype == hub.READ:
|
||||
self.loop.remove_reader(fileno)
|
||||
else:
|
||||
self.loop.remove_writer(fileno)
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
"""
|
||||
Remove a file descriptor from the ``asyncio`` loop.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
have_read = self.listeners[hub.READ].get(fileno)
|
||||
have_write = self.listeners[hub.WRITE].get(fileno)
|
||||
super().remove_descriptor(fileno)
|
||||
if have_read:
|
||||
self.loop.remove_reader(fileno)
|
||||
if have_write:
|
||||
self.loop.remove_writer(fileno)
|
||||
|
||||
def run(self, *a, **kw):
|
||||
"""
|
||||
Start the ``Hub`` running. See the superclass for details.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
async def async_run():
|
||||
if self.running:
|
||||
raise RuntimeError("Already running!")
|
||||
try:
|
||||
self.running = True
|
||||
self.stopping = False
|
||||
while not self.stopping:
|
||||
while self.closed:
|
||||
# We ditch all of these first.
|
||||
self.close_one()
|
||||
self.prepare_timers()
|
||||
if self.debug_blocking:
|
||||
self.block_detect_pre()
|
||||
self.fire_timers(self.clock())
|
||||
if self.debug_blocking:
|
||||
self.block_detect_post()
|
||||
self.prepare_timers()
|
||||
wakeup_when = self.sleep_until()
|
||||
if wakeup_when is None:
|
||||
sleep_time = self.default_sleep()
|
||||
else:
|
||||
sleep_time = wakeup_when - self.clock()
|
||||
if sleep_time > 0:
|
||||
try:
|
||||
await asyncio.wait_for(self.sleep_event.wait(), sleep_time)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
self.sleep_event.clear()
|
||||
else:
|
||||
await asyncio.sleep(0)
|
||||
else:
|
||||
self.timers_canceled = 0
|
||||
del self.timers[:]
|
||||
del self.next_timers[:]
|
||||
finally:
|
||||
self.running = False
|
||||
self.stopping = False
|
||||
|
||||
self.loop.run_until_complete(async_run())
|
||||
31
venv/lib/python3.12/site-packages/eventlet/hubs/epolls.py
Normal file
31
venv/lib/python3.12/site-packages/eventlet/hubs/epolls.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import errno
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub, poll
|
||||
select = patcher.original('select')
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'epoll')
|
||||
|
||||
|
||||
# NOTE: we rely on the fact that the epoll flag constants
|
||||
# are identical in value to the poll constants
|
||||
class Hub(poll.Hub):
|
||||
def __init__(self, clock=None):
|
||||
super().__init__(clock=clock)
|
||||
self.poll = select.epoll()
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mac):
|
||||
oldlisteners = bool(self.listeners[self.READ].get(fileno) or
|
||||
self.listeners[self.WRITE].get(fileno))
|
||||
# not super() to avoid double register()
|
||||
listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac)
|
||||
try:
|
||||
self.register(fileno, new=not oldlisteners)
|
||||
except OSError as ex: # ignore EEXIST, #80
|
||||
if support.get_errno(ex) != errno.EEXIST:
|
||||
raise
|
||||
return listener
|
||||
|
||||
def do_poll(self, seconds):
|
||||
return self.poll.poll(seconds)
|
||||
495
venv/lib/python3.12/site-packages/eventlet/hubs/hub.py
Normal file
495
venv/lib/python3.12/site-packages/eventlet/hubs/hub.py
Normal file
@@ -0,0 +1,495 @@
|
||||
import errno
|
||||
import heapq
|
||||
import math
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
arm_alarm = None
|
||||
if hasattr(signal, 'setitimer'):
|
||||
def alarm_itimer(seconds):
|
||||
signal.setitimer(signal.ITIMER_REAL, seconds)
|
||||
arm_alarm = alarm_itimer
|
||||
else:
|
||||
try:
|
||||
import itimer
|
||||
arm_alarm = itimer.alarm
|
||||
except ImportError:
|
||||
def alarm_signal(seconds):
|
||||
signal.alarm(math.ceil(seconds))
|
||||
arm_alarm = alarm_signal
|
||||
|
||||
import eventlet.hubs
|
||||
from eventlet.hubs import timer
|
||||
from eventlet.support import greenlets as greenlet
|
||||
try:
|
||||
from monotonic import monotonic
|
||||
except ImportError:
|
||||
from time import monotonic
|
||||
|
||||
g_prevent_multiple_readers = True
|
||||
|
||||
READ = "read"
|
||||
WRITE = "write"
|
||||
|
||||
|
||||
def closed_callback(fileno):
|
||||
""" Used to de-fang a callback that may be triggered by a loop in BaseHub.wait
|
||||
"""
|
||||
# No-op.
|
||||
pass
|
||||
|
||||
|
||||
class FdListener:
|
||||
|
||||
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
""" The following are required:
|
||||
cb - the standard callback, which will switch into the
|
||||
listening greenlet to indicate that the event waited upon
|
||||
is ready
|
||||
tb - a 'throwback'. This is typically greenlet.throw, used
|
||||
to raise a signal into the target greenlet indicating that
|
||||
an event was obsoleted by its underlying filehandle being
|
||||
repurposed.
|
||||
mark_as_closed - if any listener is obsoleted, this is called
|
||||
(in the context of some other client greenlet) to alert
|
||||
underlying filehandle-wrapping objects that they've been
|
||||
closed.
|
||||
"""
|
||||
assert (evtype is READ or evtype is WRITE)
|
||||
self.evtype = evtype
|
||||
self.fileno = fileno
|
||||
self.cb = cb
|
||||
self.tb = tb
|
||||
self.mark_as_closed = mark_as_closed
|
||||
self.spent = False
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno,
|
||||
self.cb, self.tb)
|
||||
__str__ = __repr__
|
||||
|
||||
def defang(self):
|
||||
self.cb = closed_callback
|
||||
if self.mark_as_closed is not None:
|
||||
self.mark_as_closed()
|
||||
self.spent = True
|
||||
|
||||
|
||||
noop = FdListener(READ, 0, lambda x: None, lambda x: None, None)
|
||||
|
||||
|
||||
# in debug mode, track the call site that created the listener
|
||||
|
||||
|
||||
class DebugListener(FdListener):
|
||||
|
||||
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
self.where_called = traceback.format_stack()
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
super().__init__(evtype, fileno, cb, tb, mark_as_closed)
|
||||
|
||||
def __repr__(self):
|
||||
return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % (
|
||||
self.evtype,
|
||||
self.fileno,
|
||||
self.cb,
|
||||
self.tb,
|
||||
self.mark_as_closed,
|
||||
self.greenlet,
|
||||
''.join(self.where_called))
|
||||
__str__ = __repr__
|
||||
|
||||
|
||||
def alarm_handler(signum, frame):
|
||||
import inspect
|
||||
raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame)))
|
||||
|
||||
|
||||
class BaseHub:
|
||||
""" Base hub class for easing the implementation of subclasses that are
|
||||
specific to a particular underlying event architecture. """
|
||||
|
||||
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
|
||||
|
||||
READ = READ
|
||||
WRITE = WRITE
|
||||
|
||||
def __init__(self, clock=None):
|
||||
self.listeners = {READ: {}, WRITE: {}}
|
||||
self.secondaries = {READ: {}, WRITE: {}}
|
||||
self.closed = []
|
||||
|
||||
if clock is None:
|
||||
clock = monotonic
|
||||
self.clock = clock
|
||||
|
||||
self.greenlet = greenlet.greenlet(self.run)
|
||||
self.stopping = False
|
||||
self.running = False
|
||||
self.timers = []
|
||||
self.next_timers = []
|
||||
self.lclass = FdListener
|
||||
self.timers_canceled = 0
|
||||
self.debug_exceptions = True
|
||||
self.debug_blocking = False
|
||||
self.debug_blocking_resolution = 1
|
||||
|
||||
def block_detect_pre(self):
|
||||
# shortest alarm we can possibly raise is one second
|
||||
tmp = signal.signal(signal.SIGALRM, alarm_handler)
|
||||
if tmp != alarm_handler:
|
||||
self._old_signal_handler = tmp
|
||||
|
||||
arm_alarm(self.debug_blocking_resolution)
|
||||
|
||||
def block_detect_post(self):
|
||||
if (hasattr(self, "_old_signal_handler") and
|
||||
self._old_signal_handler):
|
||||
signal.signal(signal.SIGALRM, self._old_signal_handler)
|
||||
signal.alarm(0)
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
""" Signals an intent to or write a particular file descriptor.
|
||||
|
||||
The *evtype* argument is either the constant READ or WRITE.
|
||||
|
||||
The *fileno* argument is the file number of the file of interest.
|
||||
|
||||
The *cb* argument is the callback which will be called when the file
|
||||
is ready for reading/writing.
|
||||
|
||||
The *tb* argument is the throwback used to signal (into the greenlet)
|
||||
that the file was closed.
|
||||
|
||||
The *mark_as_closed* is used in the context of the event hub to
|
||||
prepare a Python object as being closed, pre-empting further
|
||||
close operations from accidentally shutting down the wrong OS thread.
|
||||
"""
|
||||
listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed)
|
||||
bucket = self.listeners[evtype]
|
||||
if fileno in bucket:
|
||||
if g_prevent_multiple_readers:
|
||||
raise RuntimeError(
|
||||
"Second simultaneous %s on fileno %s "
|
||||
"detected. Unless you really know what you're doing, "
|
||||
"make sure that only one greenthread can %s any "
|
||||
"particular socket. Consider using a pools.Pool. "
|
||||
"If you do know what you're doing and want to disable "
|
||||
"this error, call "
|
||||
"eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
|
||||
"THAT THREAD=%s" % (
|
||||
evtype, fileno, evtype, cb, bucket[fileno]))
|
||||
# store off the second listener in another structure
|
||||
self.secondaries[evtype].setdefault(fileno, []).append(listener)
|
||||
else:
|
||||
bucket[fileno] = listener
|
||||
return listener
|
||||
|
||||
def _obsolete(self, fileno):
|
||||
""" We've received an indication that 'fileno' has been obsoleted.
|
||||
Any current listeners must be defanged, and notifications to
|
||||
their greenlets queued up to send.
|
||||
"""
|
||||
found = False
|
||||
for evtype, bucket in self.secondaries.items():
|
||||
if fileno in bucket:
|
||||
for listener in bucket[fileno]:
|
||||
found = True
|
||||
self.closed.append(listener)
|
||||
listener.defang()
|
||||
del bucket[fileno]
|
||||
|
||||
# For the primary listeners, we actually need to call remove,
|
||||
# which may modify the underlying OS polling objects.
|
||||
for evtype, bucket in self.listeners.items():
|
||||
if fileno in bucket:
|
||||
listener = bucket[fileno]
|
||||
found = True
|
||||
self.closed.append(listener)
|
||||
self.remove(listener)
|
||||
listener.defang()
|
||||
|
||||
return found
|
||||
|
||||
def notify_close(self, fileno):
|
||||
""" We might want to do something when a fileno is closed.
|
||||
However, currently it suffices to obsolete listeners only
|
||||
when we detect an old fileno being recycled, on open.
|
||||
"""
|
||||
pass
|
||||
|
||||
def remove(self, listener):
|
||||
if listener.spent:
|
||||
# trampoline may trigger this in its finally section.
|
||||
return
|
||||
|
||||
fileno = listener.fileno
|
||||
evtype = listener.evtype
|
||||
if listener is self.listeners[evtype][fileno]:
|
||||
del self.listeners[evtype][fileno]
|
||||
# migrate a secondary listener to be the primary listener
|
||||
if fileno in self.secondaries[evtype]:
|
||||
sec = self.secondaries[evtype][fileno]
|
||||
if sec:
|
||||
self.listeners[evtype][fileno] = sec.pop(0)
|
||||
if not sec:
|
||||
del self.secondaries[evtype][fileno]
|
||||
else:
|
||||
self.secondaries[evtype][fileno].remove(listener)
|
||||
if not self.secondaries[evtype][fileno]:
|
||||
del self.secondaries[evtype][fileno]
|
||||
|
||||
def mark_as_reopened(self, fileno):
|
||||
""" If a file descriptor is returned by the OS as the result of some
|
||||
open call (or equivalent), that signals that it might be being
|
||||
recycled.
|
||||
|
||||
Catch the case where the fd was previously in use.
|
||||
"""
|
||||
self._obsolete(fileno)
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
""" Completely remove all listeners for this fileno. For internal use
|
||||
only."""
|
||||
# gather any listeners we have
|
||||
listeners = []
|
||||
listeners.append(self.listeners[READ].get(fileno, noop))
|
||||
listeners.append(self.listeners[WRITE].get(fileno, noop))
|
||||
listeners.extend(self.secondaries[READ].get(fileno, ()))
|
||||
listeners.extend(self.secondaries[WRITE].get(fileno, ()))
|
||||
for listener in listeners:
|
||||
try:
|
||||
# listener.cb may want to remove(listener)
|
||||
listener.cb(fileno)
|
||||
except Exception:
|
||||
self.squelch_generic_exception(sys.exc_info())
|
||||
# NOW this fileno is now dead to all
|
||||
self.listeners[READ].pop(fileno, None)
|
||||
self.listeners[WRITE].pop(fileno, None)
|
||||
self.secondaries[READ].pop(fileno, None)
|
||||
self.secondaries[WRITE].pop(fileno, None)
|
||||
|
||||
def close_one(self):
|
||||
""" Triggered from the main run loop. If a listener's underlying FD was
|
||||
closed somehow, throw an exception back to the trampoline, which should
|
||||
be able to manage it appropriately.
|
||||
"""
|
||||
listener = self.closed.pop()
|
||||
if not listener.greenlet.dead:
|
||||
# There's no point signalling a greenlet that's already dead.
|
||||
listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file"))
|
||||
|
||||
def ensure_greenlet(self):
|
||||
if self.greenlet.dead:
|
||||
# create new greenlet sharing same parent as original
|
||||
new = greenlet.greenlet(self.run, self.greenlet.parent)
|
||||
# need to assign as parent of old greenlet
|
||||
# for those greenlets that are currently
|
||||
# children of the dead hub and may subsequently
|
||||
# exit without further switching to hub.
|
||||
self.greenlet.parent = new
|
||||
self.greenlet = new
|
||||
|
||||
def switch(self):
|
||||
cur = greenlet.getcurrent()
|
||||
assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP'
|
||||
switch_out = getattr(cur, 'switch_out', None)
|
||||
if switch_out is not None:
|
||||
try:
|
||||
switch_out()
|
||||
except:
|
||||
self.squelch_generic_exception(sys.exc_info())
|
||||
self.ensure_greenlet()
|
||||
try:
|
||||
if self.greenlet.parent is not cur:
|
||||
cur.parent = self.greenlet
|
||||
except ValueError:
|
||||
pass # gets raised if there is a greenlet parent cycle
|
||||
return self.greenlet.switch()
|
||||
|
||||
def squelch_exception(self, fileno, exc_info):
|
||||
traceback.print_exception(*exc_info)
|
||||
sys.stderr.write("Removing descriptor: %r\n" % (fileno,))
|
||||
sys.stderr.flush()
|
||||
try:
|
||||
self.remove_descriptor(fileno)
|
||||
except Exception as e:
|
||||
sys.stderr.write("Exception while removing descriptor! %r\n" % (e,))
|
||||
sys.stderr.flush()
|
||||
|
||||
def wait(self, seconds=None):
|
||||
raise NotImplementedError("Implement this in a subclass")
|
||||
|
||||
def default_sleep(self):
|
||||
return 60.0
|
||||
|
||||
def sleep_until(self):
|
||||
t = self.timers
|
||||
if not t:
|
||||
return None
|
||||
return t[0][0]
|
||||
|
||||
def run(self, *a, **kw):
|
||||
"""Run the runloop until abort is called.
|
||||
"""
|
||||
# accept and discard variable arguments because they will be
|
||||
# supplied if other greenlets have run and exited before the
|
||||
# hub's greenlet gets a chance to run
|
||||
if self.running:
|
||||
raise RuntimeError("Already running!")
|
||||
try:
|
||||
self.running = True
|
||||
self.stopping = False
|
||||
while not self.stopping:
|
||||
while self.closed:
|
||||
# We ditch all of these first.
|
||||
self.close_one()
|
||||
self.prepare_timers()
|
||||
if self.debug_blocking:
|
||||
self.block_detect_pre()
|
||||
self.fire_timers(self.clock())
|
||||
if self.debug_blocking:
|
||||
self.block_detect_post()
|
||||
self.prepare_timers()
|
||||
wakeup_when = self.sleep_until()
|
||||
if wakeup_when is None:
|
||||
sleep_time = self.default_sleep()
|
||||
else:
|
||||
sleep_time = wakeup_when - self.clock()
|
||||
if sleep_time > 0:
|
||||
self.wait(sleep_time)
|
||||
else:
|
||||
self.wait(0)
|
||||
else:
|
||||
self.timers_canceled = 0
|
||||
del self.timers[:]
|
||||
del self.next_timers[:]
|
||||
finally:
|
||||
self.running = False
|
||||
self.stopping = False
|
||||
|
||||
def abort(self, wait=False):
|
||||
"""Stop the runloop. If run is executing, it will exit after
|
||||
completing the next runloop iteration.
|
||||
|
||||
Set *wait* to True to cause abort to switch to the hub immediately and
|
||||
wait until it's finished processing. Waiting for the hub will only
|
||||
work from the main greenthread; all other greenthreads will become
|
||||
unreachable.
|
||||
"""
|
||||
if self.running:
|
||||
self.stopping = True
|
||||
if wait:
|
||||
assert self.greenlet is not greenlet.getcurrent(
|
||||
), "Can't abort with wait from inside the hub's greenlet."
|
||||
# schedule an immediate timer just so the hub doesn't sleep
|
||||
self.schedule_call_global(0, lambda: None)
|
||||
# switch to it; when done the hub will switch back to its parent,
|
||||
# the main greenlet
|
||||
self.switch()
|
||||
|
||||
def squelch_generic_exception(self, exc_info):
|
||||
if self.debug_exceptions:
|
||||
traceback.print_exception(*exc_info)
|
||||
sys.stderr.flush()
|
||||
|
||||
def squelch_timer_exception(self, timer, exc_info):
|
||||
if self.debug_exceptions:
|
||||
traceback.print_exception(*exc_info)
|
||||
sys.stderr.flush()
|
||||
|
||||
def add_timer(self, timer):
|
||||
scheduled_time = self.clock() + timer.seconds
|
||||
self.next_timers.append((scheduled_time, timer))
|
||||
return scheduled_time
|
||||
|
||||
def timer_canceled(self, timer):
|
||||
self.timers_canceled += 1
|
||||
len_timers = len(self.timers) + len(self.next_timers)
|
||||
if len_timers > 1000 and len_timers / 2 <= self.timers_canceled:
|
||||
self.timers_canceled = 0
|
||||
self.timers = [t for t in self.timers if not t[1].called]
|
||||
self.next_timers = [t for t in self.next_timers if not t[1].called]
|
||||
heapq.heapify(self.timers)
|
||||
|
||||
def prepare_timers(self):
|
||||
heappush = heapq.heappush
|
||||
t = self.timers
|
||||
for item in self.next_timers:
|
||||
if item[1].called:
|
||||
self.timers_canceled -= 1
|
||||
else:
|
||||
heappush(t, item)
|
||||
del self.next_timers[:]
|
||||
|
||||
def schedule_call_local(self, seconds, cb, *args, **kw):
|
||||
"""Schedule a callable to be called after 'seconds' seconds have
|
||||
elapsed. Cancel the timer if greenlet has exited.
|
||||
seconds: The number of seconds to wait.
|
||||
cb: The callable to call after the given time.
|
||||
*args: Arguments to pass to the callable when called.
|
||||
**kw: Keyword arguments to pass to the callable when called.
|
||||
"""
|
||||
t = timer.LocalTimer(seconds, cb, *args, **kw)
|
||||
self.add_timer(t)
|
||||
return t
|
||||
|
||||
def schedule_call_global(self, seconds, cb, *args, **kw):
|
||||
"""Schedule a callable to be called after 'seconds' seconds have
|
||||
elapsed. The timer will NOT be canceled if the current greenlet has
|
||||
exited before the timer fires.
|
||||
seconds: The number of seconds to wait.
|
||||
cb: The callable to call after the given time.
|
||||
*args: Arguments to pass to the callable when called.
|
||||
**kw: Keyword arguments to pass to the callable when called.
|
||||
"""
|
||||
t = timer.Timer(seconds, cb, *args, **kw)
|
||||
self.add_timer(t)
|
||||
return t
|
||||
|
||||
def fire_timers(self, when):
|
||||
t = self.timers
|
||||
heappop = heapq.heappop
|
||||
|
||||
while t:
|
||||
next = t[0]
|
||||
|
||||
exp = next[0]
|
||||
timer = next[1]
|
||||
|
||||
if when < exp:
|
||||
break
|
||||
|
||||
heappop(t)
|
||||
|
||||
try:
|
||||
if timer.called:
|
||||
self.timers_canceled -= 1
|
||||
else:
|
||||
timer()
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_timer_exception(timer, sys.exc_info())
|
||||
|
||||
# for debugging:
|
||||
|
||||
def get_readers(self):
|
||||
return self.listeners[READ].values()
|
||||
|
||||
def get_writers(self):
|
||||
return self.listeners[WRITE].values()
|
||||
|
||||
def get_timers_count(hub):
|
||||
return len(hub.timers) + len(hub.next_timers)
|
||||
|
||||
def set_debug_listeners(self, value):
|
||||
if value:
|
||||
self.lclass = DebugListener
|
||||
else:
|
||||
self.lclass = FdListener
|
||||
|
||||
def set_timer_exceptions(self, value):
|
||||
self.debug_exceptions = value
|
||||
110
venv/lib/python3.12/site-packages/eventlet/hubs/kqueue.py
Normal file
110
venv/lib/python3.12/site-packages/eventlet/hubs/kqueue.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import os
|
||||
import sys
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub
|
||||
select = patcher.original('select')
|
||||
time = patcher.original('time')
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'kqueue')
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
MAX_EVENTS = 100
|
||||
|
||||
def __init__(self, clock=None):
|
||||
self.FILTERS = {
|
||||
hub.READ: select.KQ_FILTER_READ,
|
||||
hub.WRITE: select.KQ_FILTER_WRITE,
|
||||
}
|
||||
super().__init__(clock)
|
||||
self._events = {}
|
||||
self._init_kqueue()
|
||||
|
||||
def _init_kqueue(self):
|
||||
self.kqueue = select.kqueue()
|
||||
self._pid = os.getpid()
|
||||
|
||||
def _reinit_kqueue(self):
|
||||
self.kqueue.close()
|
||||
self._init_kqueue()
|
||||
events = [e for i in self._events.values()
|
||||
for e in i.values()]
|
||||
self.kqueue.control(events, 0, 0)
|
||||
|
||||
def _control(self, events, max_events, timeout):
|
||||
try:
|
||||
return self.kqueue.control(events, max_events, timeout)
|
||||
except OSError:
|
||||
# have we forked?
|
||||
if os.getpid() != self._pid:
|
||||
self._reinit_kqueue()
|
||||
return self.kqueue.control(events, max_events, timeout)
|
||||
raise
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mac):
|
||||
listener = super().add(evtype, fileno, cb, tb, mac)
|
||||
events = self._events.setdefault(fileno, {})
|
||||
if evtype not in events:
|
||||
try:
|
||||
event = select.kevent(fileno, self.FILTERS.get(evtype), select.KQ_EV_ADD)
|
||||
self._control([event], 0, 0)
|
||||
events[evtype] = event
|
||||
except ValueError:
|
||||
super().remove(listener)
|
||||
raise
|
||||
return listener
|
||||
|
||||
def _delete_events(self, events):
|
||||
del_events = [
|
||||
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
|
||||
for e in events
|
||||
]
|
||||
self._control(del_events, 0, 0)
|
||||
|
||||
def remove(self, listener):
|
||||
super().remove(listener)
|
||||
evtype = listener.evtype
|
||||
fileno = listener.fileno
|
||||
if not self.listeners[evtype].get(fileno):
|
||||
event = self._events[fileno].pop(evtype, None)
|
||||
if event is None:
|
||||
return
|
||||
try:
|
||||
self._delete_events((event,))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
super().remove_descriptor(fileno)
|
||||
try:
|
||||
events = self._events.pop(fileno).values()
|
||||
self._delete_events(events)
|
||||
except KeyError:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def wait(self, seconds=None):
|
||||
readers = self.listeners[self.READ]
|
||||
writers = self.listeners[self.WRITE]
|
||||
|
||||
if not readers and not writers:
|
||||
if seconds:
|
||||
time.sleep(seconds)
|
||||
return
|
||||
result = self._control([], self.MAX_EVENTS, seconds)
|
||||
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
|
||||
for event in result:
|
||||
fileno = event.ident
|
||||
evfilt = event.filter
|
||||
try:
|
||||
if evfilt == select.KQ_FILTER_READ:
|
||||
readers.get(fileno, hub.noop).cb(fileno)
|
||||
if evfilt == select.KQ_FILTER_WRITE:
|
||||
writers.get(fileno, hub.noop).cb(fileno)
|
||||
except SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
118
venv/lib/python3.12/site-packages/eventlet/hubs/poll.py
Normal file
118
venv/lib/python3.12/site-packages/eventlet/hubs/poll.py
Normal file
@@ -0,0 +1,118 @@
|
||||
import errno
|
||||
import sys
|
||||
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub
|
||||
select = patcher.original('select')
|
||||
time = patcher.original('time')
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'poll')
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
def __init__(self, clock=None):
|
||||
super().__init__(clock)
|
||||
self.EXC_MASK = select.POLLERR | select.POLLHUP
|
||||
self.READ_MASK = select.POLLIN | select.POLLPRI
|
||||
self.WRITE_MASK = select.POLLOUT
|
||||
self.poll = select.poll()
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mac):
|
||||
listener = super().add(evtype, fileno, cb, tb, mac)
|
||||
self.register(fileno, new=True)
|
||||
return listener
|
||||
|
||||
def remove(self, listener):
|
||||
super().remove(listener)
|
||||
self.register(listener.fileno)
|
||||
|
||||
def register(self, fileno, new=False):
|
||||
mask = 0
|
||||
if self.listeners[self.READ].get(fileno):
|
||||
mask |= self.READ_MASK | self.EXC_MASK
|
||||
if self.listeners[self.WRITE].get(fileno):
|
||||
mask |= self.WRITE_MASK | self.EXC_MASK
|
||||
try:
|
||||
if mask:
|
||||
if new:
|
||||
self.poll.register(fileno, mask)
|
||||
else:
|
||||
try:
|
||||
self.poll.modify(fileno, mask)
|
||||
except OSError:
|
||||
self.poll.register(fileno, mask)
|
||||
else:
|
||||
try:
|
||||
self.poll.unregister(fileno)
|
||||
except (KeyError, OSError):
|
||||
# raised if we try to remove a fileno that was
|
||||
# already removed/invalid
|
||||
pass
|
||||
except ValueError:
|
||||
# fileno is bad, issue 74
|
||||
self.remove_descriptor(fileno)
|
||||
raise
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
super().remove_descriptor(fileno)
|
||||
try:
|
||||
self.poll.unregister(fileno)
|
||||
except (KeyError, ValueError, OSError):
|
||||
# raised if we try to remove a fileno that was
|
||||
# already removed/invalid
|
||||
pass
|
||||
|
||||
def do_poll(self, seconds):
|
||||
# poll.poll expects integral milliseconds
|
||||
return self.poll.poll(int(seconds * 1000.0))
|
||||
|
||||
def wait(self, seconds=None):
|
||||
readers = self.listeners[self.READ]
|
||||
writers = self.listeners[self.WRITE]
|
||||
|
||||
if not readers and not writers:
|
||||
if seconds:
|
||||
time.sleep(seconds)
|
||||
return
|
||||
try:
|
||||
presult = self.do_poll(seconds)
|
||||
except OSError as e:
|
||||
if support.get_errno(e) == errno.EINTR:
|
||||
return
|
||||
raise
|
||||
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
|
||||
|
||||
if self.debug_blocking:
|
||||
self.block_detect_pre()
|
||||
|
||||
# Accumulate the listeners to call back to prior to
|
||||
# triggering any of them. This is to keep the set
|
||||
# of callbacks in sync with the events we've just
|
||||
# polled for. It prevents one handler from invalidating
|
||||
# another.
|
||||
callbacks = set()
|
||||
noop = hub.noop # shave getattr
|
||||
for fileno, event in presult:
|
||||
if event & self.READ_MASK:
|
||||
callbacks.add((readers.get(fileno, noop), fileno))
|
||||
if event & self.WRITE_MASK:
|
||||
callbacks.add((writers.get(fileno, noop), fileno))
|
||||
if event & select.POLLNVAL:
|
||||
self.remove_descriptor(fileno)
|
||||
continue
|
||||
if event & self.EXC_MASK:
|
||||
callbacks.add((readers.get(fileno, noop), fileno))
|
||||
callbacks.add((writers.get(fileno, noop), fileno))
|
||||
|
||||
for listener, fileno in callbacks:
|
||||
try:
|
||||
listener.cb(fileno)
|
||||
except SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
|
||||
if self.debug_blocking:
|
||||
self.block_detect_post()
|
||||
@@ -0,0 +1,4 @@
|
||||
raise ImportError(
|
||||
"Eventlet pyevent hub was removed because it was not maintained."
|
||||
" Try version 0.22.1 or older. Sorry for the inconvenience."
|
||||
)
|
||||
63
venv/lib/python3.12/site-packages/eventlet/hubs/selects.py
Normal file
63
venv/lib/python3.12/site-packages/eventlet/hubs/selects.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import errno
|
||||
import sys
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub
|
||||
select = patcher.original('select')
|
||||
time = patcher.original('time')
|
||||
|
||||
try:
|
||||
BAD_SOCK = {errno.EBADF, errno.WSAENOTSOCK}
|
||||
except AttributeError:
|
||||
BAD_SOCK = {errno.EBADF}
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'select')
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
def _remove_bad_fds(self):
|
||||
""" Iterate through fds, removing the ones that are bad per the
|
||||
operating system.
|
||||
"""
|
||||
all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE])
|
||||
for fd in all_fds:
|
||||
try:
|
||||
select.select([fd], [], [], 0)
|
||||
except OSError as e:
|
||||
if support.get_errno(e) in BAD_SOCK:
|
||||
self.remove_descriptor(fd)
|
||||
|
||||
def wait(self, seconds=None):
|
||||
readers = self.listeners[self.READ]
|
||||
writers = self.listeners[self.WRITE]
|
||||
if not readers and not writers:
|
||||
if seconds:
|
||||
time.sleep(seconds)
|
||||
return
|
||||
reader_fds = list(readers)
|
||||
writer_fds = list(writers)
|
||||
all_fds = reader_fds + writer_fds
|
||||
try:
|
||||
r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds)
|
||||
except OSError as e:
|
||||
if support.get_errno(e) == errno.EINTR:
|
||||
return
|
||||
elif support.get_errno(e) in BAD_SOCK:
|
||||
self._remove_bad_fds()
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
for fileno in er:
|
||||
readers.get(fileno, hub.noop).cb(fileno)
|
||||
writers.get(fileno, hub.noop).cb(fileno)
|
||||
|
||||
for listeners, events in ((readers, r), (writers, w)):
|
||||
for fileno in events:
|
||||
try:
|
||||
listeners.get(fileno, hub.noop).cb(fileno)
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
106
venv/lib/python3.12/site-packages/eventlet/hubs/timer.py
Normal file
106
venv/lib/python3.12/site-packages/eventlet/hubs/timer.py
Normal file
@@ -0,0 +1,106 @@
|
||||
import traceback
|
||||
|
||||
import eventlet.hubs
|
||||
from eventlet.support import greenlets as greenlet
|
||||
import io
|
||||
|
||||
""" If true, captures a stack trace for each timer when constructed. This is
|
||||
useful for debugging leaking timers, to find out where the timer was set up. """
|
||||
_g_debug = False
|
||||
|
||||
|
||||
class Timer:
|
||||
def __init__(self, seconds, cb, *args, **kw):
|
||||
"""Create a timer.
|
||||
seconds: The minimum number of seconds to wait before calling
|
||||
cb: The callback to call when the timer has expired
|
||||
*args: The arguments to pass to cb
|
||||
**kw: The keyword arguments to pass to cb
|
||||
|
||||
This timer will not be run unless it is scheduled in a runloop by
|
||||
calling timer.schedule() or runloop.add_timer(timer).
|
||||
"""
|
||||
self.seconds = seconds
|
||||
self.tpl = cb, args, kw
|
||||
self.called = False
|
||||
if _g_debug:
|
||||
self.traceback = io.StringIO()
|
||||
traceback.print_stack(file=self.traceback)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return not self.called
|
||||
|
||||
def __repr__(self):
|
||||
secs = getattr(self, 'seconds', None)
|
||||
cb, args, kw = getattr(self, 'tpl', (None, None, None))
|
||||
retval = "Timer(%s, %s, *%s, **%s)" % (
|
||||
secs, cb, args, kw)
|
||||
if _g_debug and hasattr(self, 'traceback'):
|
||||
retval += '\n' + self.traceback.getvalue()
|
||||
return retval
|
||||
|
||||
def copy(self):
|
||||
cb, args, kw = self.tpl
|
||||
return self.__class__(self.seconds, cb, *args, **kw)
|
||||
|
||||
def schedule(self):
|
||||
"""Schedule this timer to run in the current runloop.
|
||||
"""
|
||||
self.called = False
|
||||
self.scheduled_time = eventlet.hubs.get_hub().add_timer(self)
|
||||
return self
|
||||
|
||||
def __call__(self, *args):
|
||||
if not self.called:
|
||||
self.called = True
|
||||
cb, args, kw = self.tpl
|
||||
try:
|
||||
cb(*args, **kw)
|
||||
finally:
|
||||
try:
|
||||
del self.tpl
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def cancel(self):
|
||||
"""Prevent this timer from being called. If the timer has already
|
||||
been called or canceled, has no effect.
|
||||
"""
|
||||
if not self.called:
|
||||
self.called = True
|
||||
eventlet.hubs.get_hub().timer_canceled(self)
|
||||
try:
|
||||
del self.tpl
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# No default ordering in 3.x. heapq uses <
|
||||
# FIXME should full set be added?
|
||||
def __lt__(self, other):
|
||||
return id(self) < id(other)
|
||||
|
||||
|
||||
class LocalTimer(Timer):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
Timer.__init__(self, *args, **kwargs)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
if self.greenlet is None or self.greenlet.dead:
|
||||
return False
|
||||
return not self.called
|
||||
|
||||
def __call__(self, *args):
|
||||
if not self.called:
|
||||
self.called = True
|
||||
if self.greenlet is not None and self.greenlet.dead:
|
||||
return
|
||||
cb, args, kw = self.tpl
|
||||
cb(*args, **kw)
|
||||
|
||||
def cancel(self):
|
||||
self.greenlet = None
|
||||
Timer.cancel(self)
|
||||
Reference in New Issue
Block a user