API refactor
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-07 16:25:52 +09:00
parent 76d0d86211
commit 91c7e04474
1171 changed files with 81940 additions and 44117 deletions

421
venv/lib/python3.12/site-packages/redis/client.py Normal file → Executable file
View File

@@ -2,9 +2,19 @@ import copy
import re
import threading
import time
import warnings
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Type, Union
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Set,
Type,
Union,
)
from redis._parsers.encoders import Encoder
from redis._parsers.helpers import (
@@ -13,33 +23,54 @@ from redis._parsers.helpers import (
_RedisCallbacksRESP3,
bool_ok,
)
from redis.backoff import ExponentialWithJitterBackoff
from redis.cache import CacheConfig, CacheInterface
from redis.commands import (
CoreCommands,
RedisModuleCommands,
SentinelCommands,
list_or_args,
)
from redis.connection import ConnectionPool, SSLConnection, UnixDomainSocketConnection
from redis.commands.core import Script
from redis.connection import (
AbstractConnection,
Connection,
ConnectionPool,
SSLConnection,
UnixDomainSocketConnection,
)
from redis.credentials import CredentialProvider
from redis.event import (
AfterPooledConnectionsInstantiationEvent,
AfterPubSubConnectionInstantiationEvent,
AfterSingleConnectionInstantiationEvent,
ClientType,
EventDispatcher,
)
from redis.exceptions import (
ConnectionError,
ExecAbortError,
PubSubError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
from redis.lock import Lock
from redis.retry import Retry
from redis.utils import (
HIREDIS_AVAILABLE,
_set_info_logger,
deprecated_args,
get_lib_version,
safe_str,
str_if_bytes,
truncate_text,
)
if TYPE_CHECKING:
import ssl
import OpenSSL
SYM_EMPTY = b""
EMPTY_RESPONSE = "EMPTY_RESPONSE"
@@ -94,7 +125,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"""
@classmethod
def from_url(cls, url: str, **kwargs) -> None:
def from_url(cls, url: str, **kwargs) -> "Redis":
"""
Return a Redis client object configured from the given URL
@@ -160,56 +191,80 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
client.auto_close_connection_pool = True
return client
@deprecated_args(
args_to_warn=["retry_on_timeout"],
reason="TimeoutError is included by default.",
version="6.0.0",
)
def __init__(
self,
host="localhost",
port=6379,
db=0,
password=None,
socket_timeout=None,
socket_connect_timeout=None,
socket_keepalive=None,
socket_keepalive_options=None,
connection_pool=None,
unix_socket_path=None,
encoding="utf-8",
encoding_errors="strict",
charset=None,
errors=None,
decode_responses=False,
retry_on_timeout=False,
retry_on_error=None,
ssl=False,
ssl_keyfile=None,
ssl_certfile=None,
ssl_cert_reqs="required",
ssl_ca_certs=None,
ssl_ca_path=None,
ssl_ca_data=None,
ssl_check_hostname=False,
ssl_password=None,
ssl_validate_ocsp=False,
ssl_validate_ocsp_stapled=False,
ssl_ocsp_context=None,
ssl_ocsp_expected_cert=None,
max_connections=None,
single_connection_client=False,
health_check_interval=0,
client_name=None,
lib_name="redis-py",
lib_version=get_lib_version(),
username=None,
retry=None,
redis_connect_func=None,
host: str = "localhost",
port: int = 6379,
db: int = 0,
password: Optional[str] = None,
socket_timeout: Optional[float] = None,
socket_connect_timeout: Optional[float] = None,
socket_keepalive: Optional[bool] = None,
socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
connection_pool: Optional[ConnectionPool] = None,
unix_socket_path: Optional[str] = None,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
retry_on_timeout: bool = False,
retry: Retry = Retry(
backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
),
retry_on_error: Optional[List[Type[Exception]]] = None,
ssl: bool = False,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
ssl_cert_reqs: Union[str, "ssl.VerifyMode"] = "required",
ssl_ca_certs: Optional[str] = None,
ssl_ca_path: Optional[str] = None,
ssl_ca_data: Optional[str] = None,
ssl_check_hostname: bool = True,
ssl_password: Optional[str] = None,
ssl_validate_ocsp: bool = False,
ssl_validate_ocsp_stapled: bool = False,
ssl_ocsp_context: Optional["OpenSSL.SSL.Context"] = None,
ssl_ocsp_expected_cert: Optional[str] = None,
ssl_min_version: Optional["ssl.TLSVersion"] = None,
ssl_ciphers: Optional[str] = None,
max_connections: Optional[int] = None,
single_connection_client: bool = False,
health_check_interval: int = 0,
client_name: Optional[str] = None,
lib_name: Optional[str] = "redis-py",
lib_version: Optional[str] = get_lib_version(),
username: Optional[str] = None,
redis_connect_func: Optional[Callable[[], None]] = None,
credential_provider: Optional[CredentialProvider] = None,
protocol: Optional[int] = 2,
cache: Optional[CacheInterface] = None,
cache_config: Optional[CacheConfig] = None,
event_dispatcher: Optional[EventDispatcher] = None,
) -> None:
"""
Initialize a new Redis client.
To specify a retry policy for specific errors, first set
`retry_on_error` to a list of the error/s to retry on, then set
`retry` to a valid `Retry` object.
To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
To specify a retry policy for specific errors, you have two options:
1. Set the `retry_on_error` to a list of the error/s to retry on, and
you can also set `retry` to a valid `Retry` object(in case the default
one is not appropriate) - with this approach the retries will be triggered
on the default errors specified in the Retry object enriched with the
errors specified in `retry_on_error`.
2. Define a `Retry` object with configured 'supported_errors' and set
it to the `retry` parameter - with this approach you completely redefine
the errors on which retries will happen.
`retry_on_timeout` is deprecated - please include the TimeoutError
either in the Retry object or in the `retry_on_error` list.
When 'connection_pool' is provided - the retry configuration of the
provided pool will be used.
Args:
@@ -217,25 +272,13 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
if `True`, connection pool is not used. In that case `Redis`
instance use is not thread safe.
"""
if event_dispatcher is None:
self._event_dispatcher = EventDispatcher()
else:
self._event_dispatcher = event_dispatcher
if not connection_pool:
if charset is not None:
warnings.warn(
DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'
)
)
encoding = charset
if errors is not None:
warnings.warn(
DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'
)
)
encoding_errors = errors
if not retry_on_error:
retry_on_error = []
if retry_on_timeout is True:
retry_on_error.append(TimeoutError)
kwargs = {
"db": db,
"username": username,
@@ -291,17 +334,50 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"ssl_validate_ocsp": ssl_validate_ocsp,
"ssl_ocsp_context": ssl_ocsp_context,
"ssl_ocsp_expected_cert": ssl_ocsp_expected_cert,
"ssl_min_version": ssl_min_version,
"ssl_ciphers": ssl_ciphers,
}
)
if (cache_config or cache) and protocol in [3, "3"]:
kwargs.update(
{
"cache": cache,
"cache_config": cache_config,
}
)
connection_pool = ConnectionPool(**kwargs)
self._event_dispatcher.dispatch(
AfterPooledConnectionsInstantiationEvent(
[connection_pool], ClientType.SYNC, credential_provider
)
)
self.auto_close_connection_pool = True
else:
self.auto_close_connection_pool = False
self._event_dispatcher.dispatch(
AfterPooledConnectionsInstantiationEvent(
[connection_pool], ClientType.SYNC, credential_provider
)
)
self.connection_pool = connection_pool
if (cache_config or cache) and self.connection_pool.get_protocol() not in [
3,
"3",
]:
raise RedisError("Client caching is only supported with RESP version 3")
self.single_connection_lock = threading.RLock()
self.connection = None
if single_connection_client:
self.connection = self.connection_pool.get_connection("_")
self._single_connection_client = single_connection_client
if self._single_connection_client:
self.connection = self.connection_pool.get_connection()
self._event_dispatcher.dispatch(
AfterSingleConnectionInstantiationEvent(
self.connection, ClientType.SYNC, self.single_connection_lock
)
)
self.response_callbacks = CaseInsensitiveDict(_RedisCallbacks)
@@ -311,7 +387,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
self.response_callbacks.update(_RedisCallbacksRESP2)
def __repr__(self) -> str:
return f"{type(self).__name__}<{repr(self.connection_pool)}>"
return (
f"<{type(self).__module__}.{type(self).__name__}"
f"({repr(self.connection_pool)})>"
)
def get_encoder(self) -> "Encoder":
"""Get the connection pool's encoder"""
@@ -321,10 +400,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"""Get the connection's key-word arguments"""
return self.connection_pool.connection_kwargs
def get_retry(self) -> Optional["Retry"]:
def get_retry(self) -> Optional[Retry]:
return self.get_connection_kwargs().get("retry")
def set_retry(self, retry: "Retry") -> None:
def set_retry(self, retry: Retry) -> None:
self.get_connection_kwargs().update({"retry": retry})
self.connection_pool.set_retry(retry)
@@ -369,7 +448,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
def transaction(
self, func: Callable[["Pipeline"], None], *watches, **kwargs
) -> None:
) -> Union[List[Any], Any, None]:
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
@@ -400,6 +479,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
blocking_timeout: Optional[float] = None,
lock_class: Union[None, Any] = None,
thread_local: bool = True,
raise_on_release_error: bool = True,
):
"""
Return a new Lock object using key ``name`` that mimics
@@ -446,6 +526,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
``raise_on_release_error`` indicates whether to raise an exception when
the lock is no longer owned when exiting the context manager. By default,
this is True, meaning an exception will be raised. If False, the warning
will be logged and the exception will be suppressed.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
@@ -463,6 +548,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
blocking=blocking,
blocking_timeout=blocking_timeout,
thread_local=thread_local,
raise_on_release_error=raise_on_release_error,
)
def pubsub(self, **kwargs):
@@ -471,7 +557,9 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, **kwargs)
return PubSub(
self.connection_pool, event_dispatcher=self._event_dispatcher, **kwargs
)
def monitor(self):
return Monitor(self.connection_pool)
@@ -488,9 +576,12 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
self.close()
def __del__(self):
self.close()
try:
self.close()
except Exception:
pass
def close(self):
def close(self) -> None:
# In case a connection property does not yet exist
# (due to a crash earlier in the Redis() constructor), return
# immediately as there is nothing to clean-up.
@@ -509,37 +600,44 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"""
Send a command and parse the response
"""
conn.send_command(*args)
conn.send_command(*args, **options)
return self.parse_response(conn, command_name, **options)
def _disconnect_raise(self, conn, error):
def _close_connection(self, conn) -> None:
"""
Close the connection and raise an exception
if retry_on_error is not set or the error
is not one of the specified error types
Close the connection before retrying.
The supported exceptions are already checked in the
retry object so we don't need to do it here.
After we disconnect the connection, it will try to reconnect and
do a health check as part of the send_command logic(on connection level).
"""
conn.disconnect()
if (
conn.retry_on_error is None
or isinstance(error, tuple(conn.retry_on_error)) is False
):
raise error
# COMMAND EXECUTION AND PROTOCOL PARSING
def execute_command(self, *args, **options):
return self._execute_command(*args, **options)
def _execute_command(self, *args, **options):
"""Execute a command and return a parsed response"""
pool = self.connection_pool
command_name = args[0]
conn = self.connection or pool.get_connection(command_name, **options)
conn = self.connection or pool.get_connection()
if self._single_connection_client:
self.single_connection_lock.acquire()
try:
return conn.retry.call_with_retry(
lambda: self._send_command_parse_response(
conn, command_name, *args, **options
),
lambda error: self._disconnect_raise(conn, error),
lambda _: self._close_connection(conn),
)
finally:
if self._single_connection_client:
self.single_connection_lock.release()
if not self.connection:
pool.release(conn)
@@ -559,10 +657,16 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
if EMPTY_RESPONSE in options:
options.pop(EMPTY_RESPONSE)
# Remove keys entry, it needs only for cache.
options.pop("keys", None)
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
def get_cache(self) -> Optional[CacheInterface]:
return self.connection_pool.cache
StrictRedis = Redis
@@ -579,7 +683,7 @@ class Monitor:
def __init__(self, connection_pool):
self.connection_pool = connection_pool
self.connection = self.connection_pool.get_connection("MONITOR")
self.connection = self.connection_pool.get_connection()
def __enter__(self):
self.connection.send_command("MONITOR")
@@ -654,6 +758,7 @@ class PubSub:
ignore_subscribe_messages: bool = False,
encoder: Optional["Encoder"] = None,
push_handler_func: Union[None, Callable[[str], None]] = None,
event_dispatcher: Optional["EventDispatcher"] = None,
):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
@@ -664,6 +769,12 @@ class PubSub:
# to lookup channel and pattern names for callback handlers.
self.encoder = encoder
self.push_handler_func = push_handler_func
if event_dispatcher is None:
self._event_dispatcher = EventDispatcher()
else:
self._event_dispatcher = event_dispatcher
self._lock = threading.RLock()
if self.encoder is None:
self.encoder = self.connection_pool.get_encoder()
self.health_check_response_b = self.encoder.encode(self.HEALTH_CHECK_MESSAGE)
@@ -693,7 +804,7 @@ class PubSub:
def reset(self) -> None:
if self.connection:
self.connection.disconnect()
self.connection._deregister_connect_callback(self.on_connect)
self.connection.deregister_connect_callback(self.on_connect)
self.connection_pool.release(self.connection)
self.connection = None
self.health_check_response_counter = 0
@@ -746,19 +857,23 @@ class PubSub:
# subscribed to one or more channels
if self.connection is None:
self.connection = self.connection_pool.get_connection(
"pubsub", self.shard_hint
)
self.connection = self.connection_pool.get_connection()
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection._register_connect_callback(self.on_connect)
if self.push_handler_func is not None and not HIREDIS_AVAILABLE:
self.connection._parser.set_push_handler(self.push_handler_func)
self.connection.register_connect_callback(self.on_connect)
if self.push_handler_func is not None:
self.connection._parser.set_pubsub_push_handler(self.push_handler_func)
self._event_dispatcher.dispatch(
AfterPubSubConnectionInstantiationEvent(
self.connection, self.connection_pool, ClientType.SYNC, self._lock
)
)
connection = self.connection
kwargs = {"check_health": not self.subscribed}
if not self.subscribed:
self.clean_health_check_responses()
self._execute(connection, connection.send_command, *args, **kwargs)
with self._lock:
self._execute(connection, connection.send_command, *args, **kwargs)
def clean_health_check_responses(self) -> None:
"""
@@ -774,19 +889,18 @@ class PubSub:
else:
raise PubSubError(
"A non health check response was cleaned by "
"execute_command: {0}".format(response)
"execute_command: {}".format(response)
)
ttl -= 1
def _disconnect_raise_connect(self, conn, error) -> None:
def _reconnect(self, conn) -> None:
"""
Close the connection and raise an exception
if retry_on_timeout is not set or the error
is not a TimeoutError. Otherwise, try to reconnect
The supported exceptions are already checked in the
retry object so we don't need to do it here.
In this error handler we are trying to reconnect to the server.
"""
conn.disconnect()
if not (conn.retry_on_timeout and isinstance(error, TimeoutError)):
raise error
conn.connect()
def _execute(self, conn, command, *args, **kwargs):
@@ -799,7 +913,7 @@ class PubSub:
"""
return conn.retry.call_with_retry(
lambda: command(*args, **kwargs),
lambda error: self._disconnect_raise_connect(conn, error),
lambda _: self._reconnect(conn),
)
def parse_response(self, block=True, timeout=0):
@@ -848,7 +962,7 @@ class PubSub:
"did you forget to call subscribe() or psubscribe()?"
)
if conn.health_check_interval and time.time() > conn.next_health_check:
if conn.health_check_interval and time.monotonic() > conn.next_health_check:
conn.send_command("PING", self.HEALTH_CHECK_MESSAGE, check_health=False)
self.health_check_response_counter += 1
@@ -998,12 +1112,12 @@ class PubSub:
"""
if not self.subscribed:
# Wait for subscription
start_time = time.time()
start_time = time.monotonic()
if self.subscribed_event.wait(timeout) is True:
# The connection was subscribed during the timeout time frame.
# The timeout should be adjusted based on the time spent
# waiting for the subscription
time_spent = time.time() - start_time
time_spent = time.monotonic() - start_time
timeout = max(0.0, timeout - time_spent)
else:
# The connection isn't subscribed to any channels or patterns,
@@ -1100,7 +1214,7 @@ class PubSub:
def run_in_thread(
self,
sleep_time: int = 0,
sleep_time: float = 0.0,
daemon: bool = False,
exception_handler: Optional[Callable] = None,
) -> "PubSubWorkerThread":
@@ -1168,7 +1282,8 @@ class Pipeline(Redis):
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
All commands executed within a pipeline(when running in transactional mode,
which is the default behavior) are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
@@ -1183,15 +1298,22 @@ class Pipeline(Redis):
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
def __init__(self, connection_pool, response_callbacks, transaction, shard_hint):
def __init__(
self,
connection_pool: ConnectionPool,
response_callbacks,
transaction,
shard_hint,
):
self.connection_pool = connection_pool
self.connection = None
self.connection: Optional[Connection] = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
self.command_stack = []
self.scripts: Set[Script] = set()
self.explicit_transaction = False
def __enter__(self) -> "Pipeline":
return self
@@ -1257,47 +1379,51 @@ class Pipeline(Redis):
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def _disconnect_reset_raise(self, conn, error) -> None:
def _disconnect_reset_raise_on_watching(
self,
conn: AbstractConnection,
error: Exception,
) -> None:
"""
Close the connection, reset watching state and
raise an exception if we were watching,
retry_on_timeout is not set,
or the error is not a TimeoutError
Close the connection reset watching state and
raise an exception if we were watching.
The supported exceptions are already checked in the
retry object so we don't need to do it here.
After we disconnect the connection, it will try to reconnect and
do a health check as part of the send_command logic(on connection level).
"""
conn.disconnect()
# if we were already watching a variable, the watch is no longer
# valid since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
self.reset()
raise WatchError(
"A ConnectionError occurred on while watching one or more keys"
f"A {type(error).__name__} occurred while watching one or more keys"
)
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(error, TimeoutError)):
self.reset()
raise
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
Execute a command immediately, but don't auto-retry on the supported
errors for retry if we're already WATCHing a variable.
Used when issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name, self.shard_hint)
conn = self.connection_pool.get_connection()
self.connection = conn
return conn.retry.call_with_retry(
lambda: self._send_command_parse_response(
conn, command_name, *args, **options
),
lambda error: self._disconnect_reset_raise(conn, error),
lambda error: self._disconnect_reset_raise_on_watching(conn, error),
)
def pipeline_execute_command(self, *args, **options) -> "Pipeline":
@@ -1315,7 +1441,9 @@ class Pipeline(Redis):
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands, raise_on_error) -> List:
def _execute_transaction(
self, connection: Connection, commands, raise_on_error
) -> List:
cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
all_cmds = connection.pack_commands(
[args for args, options in cmds if EMPTY_RESPONSE not in options]
@@ -1376,6 +1504,8 @@ class Pipeline(Redis):
for r, cmd in zip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
# Remove keys entry, it needs only for cache.
options.pop("keys", None)
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
@@ -1407,7 +1537,7 @@ class Pipeline(Redis):
def annotate_exception(self, exception, number, command):
cmd = " ".join(map(safe_str, command))
msg = (
f"Command # {number} ({cmd}) of pipeline "
f"Command # {number} ({truncate_text(cmd)}) of pipeline "
f"caused error: {exception.args[0]}"
)
exception.args = (msg,) + exception.args[1:]
@@ -1433,11 +1563,19 @@ class Pipeline(Redis):
if not exist:
s.sha = immediate("SCRIPT LOAD", s.script)
def _disconnect_raise_reset(self, conn: Redis, error: Exception) -> None:
def _disconnect_raise_on_watching(
self,
conn: AbstractConnection,
error: Exception,
) -> None:
"""
Close the connection, raise an exception if we were watching,
and raise an exception if TimeoutError is not part of retry_on_error,
or the error is not a TimeoutError
Close the connection, raise an exception if we were watching.
The supported exceptions are already checked in the
retry object so we don't need to do it here.
After we disconnect the connection, it will try to reconnect and
do a health check as part of the send_command logic(on connection level).
"""
conn.disconnect()
# if we were watching a variable, the watch is no longer valid
@@ -1445,17 +1583,10 @@ class Pipeline(Redis):
# indicates the user should retry this transaction.
if self.watching:
raise WatchError(
"A ConnectionError occurred on while watching one or more keys"
f"A {type(error).__name__} occurred while watching one or more keys"
)
# if TimeoutError is not part of retry_on_error, or the error
# is not a TimeoutError, raise it
if not (
TimeoutError in conn.retry_on_error and isinstance(error, TimeoutError)
):
self.reset()
raise error
def execute(self, raise_on_error=True):
def execute(self, raise_on_error: bool = True) -> List[Any]:
"""Execute all the commands in the current pipeline"""
stack = self.command_stack
if not stack and not self.watching:
@@ -1469,7 +1600,7 @@ class Pipeline(Redis):
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection("MULTI", self.shard_hint)
conn = self.connection_pool.get_connection()
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
@@ -1477,7 +1608,7 @@ class Pipeline(Redis):
try:
return conn.retry.call_with_retry(
lambda: execute(conn, stack, raise_on_error),
lambda error: self._disconnect_raise_reset(conn, error),
lambda error: self._disconnect_raise_on_watching(conn, error),
)
finally:
self.reset()