Major fixes and new features
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-09-25 15:51:48 +09:00
parent dd7349bb4c
commit ddce9f5125
5586 changed files with 1470941 additions and 0 deletions

View File

@@ -0,0 +1,5 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0

View File

@@ -0,0 +1,5 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0

View File

@@ -0,0 +1,136 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef class WriteBuffer:
cdef:
# Preallocated small buffer
bint _smallbuf_inuse
char _smallbuf[_BUFFER_INITIAL_SIZE]
char *_buf
# Allocated size
ssize_t _size
# Length of data in the buffer
ssize_t _length
# Number of memoryviews attached to the buffer
int _view_count
# True is start_message was used
bint _message_mode
cdef inline len(self):
return self._length
cdef inline write_len_prefixed_utf8(self, str s):
return self.write_len_prefixed_bytes(s.encode('utf-8'))
cdef inline _check_readonly(self)
cdef inline _ensure_alloced(self, ssize_t extra_length)
cdef _reallocate(self, ssize_t new_size)
cdef inline reset(self)
cdef inline start_message(self, char type)
cdef inline end_message(self)
cdef write_buffer(self, WriteBuffer buf)
cdef write_byte(self, char b)
cdef write_bytes(self, bytes data)
cdef write_len_prefixed_buffer(self, WriteBuffer buf)
cdef write_len_prefixed_bytes(self, bytes data)
cdef write_bytestring(self, bytes string)
cdef write_str(self, str string, str encoding)
cdef write_frbuf(self, FRBuffer *buf)
cdef write_cstr(self, const char *data, ssize_t len)
cdef write_int16(self, int16_t i)
cdef write_int32(self, int32_t i)
cdef write_int64(self, int64_t i)
cdef write_float(self, float f)
cdef write_double(self, double d)
@staticmethod
cdef WriteBuffer new_message(char type)
@staticmethod
cdef WriteBuffer new()
ctypedef const char * (*try_consume_message_method)(object, ssize_t*)
ctypedef int32_t (*take_message_type_method)(object, char) except -1
ctypedef int32_t (*take_message_method)(object) except -1
ctypedef char (*get_message_type_method)(object)
cdef class ReadBuffer:
cdef:
# A deque of buffers (bytes objects)
object _bufs
object _bufs_append
object _bufs_popleft
# A pointer to the first buffer in `_bufs`
bytes _buf0
# A pointer to the previous first buffer
# (used to prolong the life of _buf0 when using
# methods like _try_read_bytes)
bytes _buf0_prev
# Number of buffers in `_bufs`
int32_t _bufs_len
# A read position in the first buffer in `_bufs`
ssize_t _pos0
# Length of the first buffer in `_bufs`
ssize_t _len0
# A total number of buffered bytes in ReadBuffer
ssize_t _length
char _current_message_type
int32_t _current_message_len
ssize_t _current_message_len_unread
bint _current_message_ready
cdef inline len(self):
return self._length
cdef inline char get_message_type(self):
return self._current_message_type
cdef inline int32_t get_message_length(self):
return self._current_message_len
cdef feed_data(self, data)
cdef inline _ensure_first_buf(self)
cdef _switch_to_next_buf(self)
cdef inline char read_byte(self) except? -1
cdef inline const char* _try_read_bytes(self, ssize_t nbytes)
cdef inline _read_into(self, char *buf, ssize_t nbytes)
cdef inline _read_and_discard(self, ssize_t nbytes)
cdef bytes read_bytes(self, ssize_t nbytes)
cdef bytes read_len_prefixed_bytes(self)
cdef str read_len_prefixed_utf8(self)
cdef read_uuid(self)
cdef inline int64_t read_int64(self) except? -1
cdef inline int32_t read_int32(self) except? -1
cdef inline int16_t read_int16(self) except? -1
cdef inline read_null_str(self)
cdef int32_t take_message(self) except -1
cdef inline int32_t take_message_type(self, char mtype) except -1
cdef int32_t put_message(self) except -1
cdef inline const char* try_consume_message(self, ssize_t* len)
cdef bytes consume_message(self)
cdef discard_message(self)
cdef redirect_messages(self, WriteBuffer buf, char mtype, int stop_at=?)
cdef bytearray consume_messages(self, char mtype)
cdef finish_message(self)
cdef inline _finish_message(self)
@staticmethod
cdef ReadBuffer new_message_parser(object data)

View File

@@ -0,0 +1,817 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
from libc.string cimport memcpy
import collections
class BufferError(Exception):
pass
@cython.no_gc_clear
@cython.final
@cython.freelist(_BUFFER_FREELIST_SIZE)
cdef class WriteBuffer:
def __cinit__(self):
self._smallbuf_inuse = True
self._buf = self._smallbuf
self._size = _BUFFER_INITIAL_SIZE
self._length = 0
self._message_mode = 0
def __dealloc__(self):
if self._buf is not NULL and not self._smallbuf_inuse:
cpython.PyMem_Free(self._buf)
self._buf = NULL
self._size = 0
if self._view_count:
raise BufferError(
'Deallocating buffer with attached memoryviews')
def __getbuffer__(self, Py_buffer *buffer, int flags):
self._view_count += 1
cpython.PyBuffer_FillInfo(
buffer, self, self._buf, self._length,
1, # read-only
flags)
def __releasebuffer__(self, Py_buffer *buffer):
self._view_count -= 1
cdef inline _check_readonly(self):
if self._view_count:
raise BufferError('the buffer is in read-only mode')
cdef inline _ensure_alloced(self, ssize_t extra_length):
cdef ssize_t new_size = extra_length + self._length
if new_size > self._size:
self._reallocate(new_size)
cdef _reallocate(self, ssize_t new_size):
cdef char *new_buf
if new_size < _BUFFER_MAX_GROW:
new_size = _BUFFER_MAX_GROW
else:
# Add a little extra
new_size += _BUFFER_INITIAL_SIZE
if self._smallbuf_inuse:
new_buf = <char*>cpython.PyMem_Malloc(
sizeof(char) * <size_t>new_size)
if new_buf is NULL:
self._buf = NULL
self._size = 0
self._length = 0
raise MemoryError
memcpy(new_buf, self._buf, <size_t>self._size)
self._size = new_size
self._buf = new_buf
self._smallbuf_inuse = False
else:
new_buf = <char*>cpython.PyMem_Realloc(
<void*>self._buf, <size_t>new_size)
if new_buf is NULL:
cpython.PyMem_Free(self._buf)
self._buf = NULL
self._size = 0
self._length = 0
raise MemoryError
self._buf = new_buf
self._size = new_size
cdef inline start_message(self, char type):
if self._length != 0:
raise BufferError(
'cannot start_message for a non-empty buffer')
self._ensure_alloced(5)
self._message_mode = 1
self._buf[0] = type
self._length = 5
cdef inline end_message(self):
# "length-1" to exclude the message type byte
cdef ssize_t mlen = self._length - 1
self._check_readonly()
if not self._message_mode:
raise BufferError(
'end_message can only be called with start_message')
if self._length < 5:
raise BufferError('end_message: buffer is too small')
if mlen > _MAXINT32:
raise BufferError('end_message: message is too large')
hton.pack_int32(&self._buf[1], <int32_t>mlen)
return self
cdef inline reset(self):
self._length = 0
self._message_mode = 0
cdef write_buffer(self, WriteBuffer buf):
self._check_readonly()
if not buf._length:
return
self._ensure_alloced(buf._length)
memcpy(self._buf + self._length,
<void*>buf._buf,
<size_t>buf._length)
self._length += buf._length
cdef write_byte(self, char b):
self._check_readonly()
self._ensure_alloced(1)
self._buf[self._length] = b
self._length += 1
cdef write_bytes(self, bytes data):
cdef char* buf
cdef ssize_t len
cpython.PyBytes_AsStringAndSize(data, &buf, &len)
self.write_cstr(buf, len)
cdef write_bytestring(self, bytes string):
cdef char* buf
cdef ssize_t len
cpython.PyBytes_AsStringAndSize(string, &buf, &len)
# PyBytes_AsStringAndSize returns a null-terminated buffer,
# but the null byte is not counted in len. hence the + 1
self.write_cstr(buf, len + 1)
cdef write_str(self, str string, str encoding):
self.write_bytestring(string.encode(encoding))
cdef write_len_prefixed_buffer(self, WriteBuffer buf):
# Write a length-prefixed (not NULL-terminated) bytes sequence.
self.write_int32(<int32_t>buf.len())
self.write_buffer(buf)
cdef write_len_prefixed_bytes(self, bytes data):
# Write a length-prefixed (not NULL-terminated) bytes sequence.
cdef:
char *buf
ssize_t size
cpython.PyBytes_AsStringAndSize(data, &buf, &size)
if size > _MAXINT32:
raise BufferError('string is too large')
# `size` does not account for the NULL at the end.
self.write_int32(<int32_t>size)
self.write_cstr(buf, size)
cdef write_frbuf(self, FRBuffer *buf):
cdef:
ssize_t buf_len = buf.len
if buf_len > 0:
self.write_cstr(frb_read_all(buf), buf_len)
cdef write_cstr(self, const char *data, ssize_t len):
self._check_readonly()
self._ensure_alloced(len)
memcpy(self._buf + self._length, <void*>data, <size_t>len)
self._length += len
cdef write_int16(self, int16_t i):
self._check_readonly()
self._ensure_alloced(2)
hton.pack_int16(&self._buf[self._length], i)
self._length += 2
cdef write_int32(self, int32_t i):
self._check_readonly()
self._ensure_alloced(4)
hton.pack_int32(&self._buf[self._length], i)
self._length += 4
cdef write_int64(self, int64_t i):
self._check_readonly()
self._ensure_alloced(8)
hton.pack_int64(&self._buf[self._length], i)
self._length += 8
cdef write_float(self, float f):
self._check_readonly()
self._ensure_alloced(4)
hton.pack_float(&self._buf[self._length], f)
self._length += 4
cdef write_double(self, double d):
self._check_readonly()
self._ensure_alloced(8)
hton.pack_double(&self._buf[self._length], d)
self._length += 8
@staticmethod
cdef WriteBuffer new_message(char type):
cdef WriteBuffer buf
buf = WriteBuffer.__new__(WriteBuffer)
buf.start_message(type)
return buf
@staticmethod
cdef WriteBuffer new():
cdef WriteBuffer buf
buf = WriteBuffer.__new__(WriteBuffer)
return buf
@cython.no_gc_clear
@cython.final
@cython.freelist(_BUFFER_FREELIST_SIZE)
cdef class ReadBuffer:
def __cinit__(self):
self._bufs = collections.deque()
self._bufs_append = self._bufs.append
self._bufs_popleft = self._bufs.popleft
self._bufs_len = 0
self._buf0 = None
self._buf0_prev = None
self._pos0 = 0
self._len0 = 0
self._length = 0
self._current_message_type = 0
self._current_message_len = 0
self._current_message_len_unread = 0
self._current_message_ready = 0
cdef feed_data(self, data):
cdef:
ssize_t dlen
bytes data_bytes
if not cpython.PyBytes_CheckExact(data):
if cpythonx.PyByteArray_CheckExact(data):
# ProactorEventLoop in Python 3.10+ seems to be sending
# bytearray objects instead of bytes. Handle this here
# to avoid duplicating this check in every data_received().
data = bytes(data)
else:
raise BufferError(
'feed_data: a bytes or bytearray object expected')
# Uncomment the below code to test code paths that
# read single int/str/bytes sequences are split over
# multiple received buffers.
#
# ll = 107
# if len(data) > ll:
# self.feed_data(data[:ll])
# self.feed_data(data[ll:])
# return
data_bytes = <bytes>data
dlen = cpython.Py_SIZE(data_bytes)
if dlen == 0:
# EOF?
return
self._bufs_append(data_bytes)
self._length += dlen
if self._bufs_len == 0:
# First buffer
self._len0 = dlen
self._buf0 = data_bytes
self._bufs_len += 1
cdef inline _ensure_first_buf(self):
if PG_DEBUG:
if self._len0 == 0:
raise BufferError('empty first buffer')
if self._length == 0:
raise BufferError('empty buffer')
if self._pos0 == self._len0:
self._switch_to_next_buf()
cdef _switch_to_next_buf(self):
# The first buffer is fully read, discard it
self._bufs_popleft()
self._bufs_len -= 1
# Shouldn't fail, since we've checked that `_length >= 1`
# in _ensure_first_buf()
self._buf0_prev = self._buf0
self._buf0 = <bytes>self._bufs[0]
self._pos0 = 0
self._len0 = len(self._buf0)
if PG_DEBUG:
if self._len0 < 1:
raise BufferError(
'debug: second buffer of ReadBuffer is empty')
cdef inline const char* _try_read_bytes(self, ssize_t nbytes):
# Try to read *nbytes* from the first buffer.
#
# Returns pointer to data if there is at least *nbytes*
# in the buffer, NULL otherwise.
#
# Important: caller must call _ensure_first_buf() prior
# to calling try_read_bytes, and must not overread
cdef:
const char *result
if PG_DEBUG:
if nbytes > self._length:
return NULL
if self._current_message_ready:
if self._current_message_len_unread < nbytes:
return NULL
if self._pos0 + nbytes <= self._len0:
result = cpython.PyBytes_AS_STRING(self._buf0)
result += self._pos0
self._pos0 += nbytes
self._length -= nbytes
if self._current_message_ready:
self._current_message_len_unread -= nbytes
return result
else:
return NULL
cdef inline _read_into(self, char *buf, ssize_t nbytes):
cdef:
ssize_t nread
char *buf0
while True:
buf0 = cpython.PyBytes_AS_STRING(self._buf0)
if self._pos0 + nbytes > self._len0:
nread = self._len0 - self._pos0
memcpy(buf, buf0 + self._pos0, <size_t>nread)
self._pos0 = self._len0
self._length -= nread
nbytes -= nread
buf += nread
self._ensure_first_buf()
else:
memcpy(buf, buf0 + self._pos0, <size_t>nbytes)
self._pos0 += nbytes
self._length -= nbytes
break
cdef inline _read_and_discard(self, ssize_t nbytes):
cdef:
ssize_t nread
self._ensure_first_buf()
while True:
if self._pos0 + nbytes > self._len0:
nread = self._len0 - self._pos0
self._pos0 = self._len0
self._length -= nread
nbytes -= nread
self._ensure_first_buf()
else:
self._pos0 += nbytes
self._length -= nbytes
break
cdef bytes read_bytes(self, ssize_t nbytes):
cdef:
bytes result
ssize_t nread
const char *cbuf
char *buf
self._ensure_first_buf()
cbuf = self._try_read_bytes(nbytes)
if cbuf != NULL:
return cpython.PyBytes_FromStringAndSize(cbuf, nbytes)
if nbytes > self._length:
raise BufferError(
'not enough data to read {} bytes'.format(nbytes))
if self._current_message_ready:
self._current_message_len_unread -= nbytes
if self._current_message_len_unread < 0:
raise BufferError('buffer overread')
result = cpython.PyBytes_FromStringAndSize(NULL, nbytes)
buf = cpython.PyBytes_AS_STRING(result)
self._read_into(buf, nbytes)
return result
cdef bytes read_len_prefixed_bytes(self):
cdef int32_t size = self.read_int32()
if size < 0:
raise BufferError(
'negative length for a len-prefixed bytes value')
if size == 0:
return b''
return self.read_bytes(size)
cdef str read_len_prefixed_utf8(self):
cdef:
int32_t size
const char *cbuf
size = self.read_int32()
if size < 0:
raise BufferError(
'negative length for a len-prefixed bytes value')
if size == 0:
return ''
self._ensure_first_buf()
cbuf = self._try_read_bytes(size)
if cbuf != NULL:
return cpython.PyUnicode_DecodeUTF8(cbuf, size, NULL)
else:
return self.read_bytes(size).decode('utf-8')
cdef read_uuid(self):
cdef:
bytes mem
const char *cbuf
self._ensure_first_buf()
cbuf = self._try_read_bytes(16)
if cbuf != NULL:
return pg_uuid_from_buf(cbuf)
else:
return pg_UUID(self.read_bytes(16))
cdef inline char read_byte(self) except? -1:
cdef const char *first_byte
if PG_DEBUG:
if not self._buf0:
raise BufferError(
'debug: first buffer of ReadBuffer is empty')
self._ensure_first_buf()
first_byte = self._try_read_bytes(1)
if first_byte is NULL:
raise BufferError('not enough data to read one byte')
return first_byte[0]
cdef inline int64_t read_int64(self) except? -1:
cdef:
bytes mem
const char *cbuf
self._ensure_first_buf()
cbuf = self._try_read_bytes(8)
if cbuf != NULL:
return hton.unpack_int64(cbuf)
else:
mem = self.read_bytes(8)
return hton.unpack_int64(cpython.PyBytes_AS_STRING(mem))
cdef inline int32_t read_int32(self) except? -1:
cdef:
bytes mem
const char *cbuf
self._ensure_first_buf()
cbuf = self._try_read_bytes(4)
if cbuf != NULL:
return hton.unpack_int32(cbuf)
else:
mem = self.read_bytes(4)
return hton.unpack_int32(cpython.PyBytes_AS_STRING(mem))
cdef inline int16_t read_int16(self) except? -1:
cdef:
bytes mem
const char *cbuf
self._ensure_first_buf()
cbuf = self._try_read_bytes(2)
if cbuf != NULL:
return hton.unpack_int16(cbuf)
else:
mem = self.read_bytes(2)
return hton.unpack_int16(cpython.PyBytes_AS_STRING(mem))
cdef inline read_null_str(self):
if not self._current_message_ready:
raise BufferError(
'read_null_str only works when the message guaranteed '
'to be in the buffer')
cdef:
ssize_t pos
ssize_t nread
bytes result
const char *buf
const char *buf_start
self._ensure_first_buf()
buf_start = cpython.PyBytes_AS_STRING(self._buf0)
buf = buf_start + self._pos0
while buf - buf_start < self._len0:
if buf[0] == 0:
pos = buf - buf_start
nread = pos - self._pos0
buf = self._try_read_bytes(nread + 1)
if buf != NULL:
return cpython.PyBytes_FromStringAndSize(buf, nread)
else:
break
else:
buf += 1
result = b''
while True:
pos = self._buf0.find(b'\x00', self._pos0)
if pos >= 0:
result += self._buf0[self._pos0 : pos]
nread = pos - self._pos0 + 1
self._pos0 = pos + 1
self._length -= nread
self._current_message_len_unread -= nread
if self._current_message_len_unread < 0:
raise BufferError(
'read_null_str: buffer overread')
return result
else:
result += self._buf0[self._pos0:]
nread = self._len0 - self._pos0
self._pos0 = self._len0
self._length -= nread
self._current_message_len_unread -= nread
if self._current_message_len_unread < 0:
raise BufferError(
'read_null_str: buffer overread')
self._ensure_first_buf()
cdef int32_t take_message(self) except -1:
cdef:
const char *cbuf
if self._current_message_ready:
return 1
if self._current_message_type == 0:
if self._length < 1:
return 0
self._ensure_first_buf()
cbuf = self._try_read_bytes(1)
if cbuf == NULL:
raise BufferError(
'failed to read one byte on a non-empty buffer')
self._current_message_type = cbuf[0]
if self._current_message_len == 0:
if self._length < 4:
return 0
self._ensure_first_buf()
cbuf = self._try_read_bytes(4)
if cbuf != NULL:
self._current_message_len = hton.unpack_int32(cbuf)
else:
self._current_message_len = self.read_int32()
self._current_message_len_unread = self._current_message_len - 4
if self._length < self._current_message_len_unread:
return 0
self._current_message_ready = 1
return 1
cdef inline int32_t take_message_type(self, char mtype) except -1:
cdef const char *buf0
if self._current_message_ready:
return self._current_message_type == mtype
elif self._length >= 1:
self._ensure_first_buf()
buf0 = cpython.PyBytes_AS_STRING(self._buf0)
return buf0[self._pos0] == mtype and self.take_message()
else:
return 0
cdef int32_t put_message(self) except -1:
if not self._current_message_ready:
raise BufferError(
'cannot put message: no message taken')
self._current_message_ready = False
return 0
cdef inline const char* try_consume_message(self, ssize_t* len):
cdef:
ssize_t buf_len
const char *buf
if not self._current_message_ready:
return NULL
self._ensure_first_buf()
buf_len = self._current_message_len_unread
buf = self._try_read_bytes(buf_len)
if buf != NULL:
len[0] = buf_len
self._finish_message()
return buf
cdef discard_message(self):
if not self._current_message_ready:
raise BufferError('no message to discard')
if self._current_message_len_unread > 0:
self._read_and_discard(self._current_message_len_unread)
self._current_message_len_unread = 0
self._finish_message()
cdef bytes consume_message(self):
if not self._current_message_ready:
raise BufferError('no message to consume')
if self._current_message_len_unread > 0:
mem = self.read_bytes(self._current_message_len_unread)
else:
mem = b''
self._finish_message()
return mem
cdef redirect_messages(self, WriteBuffer buf, char mtype,
int stop_at=0):
if not self._current_message_ready:
raise BufferError(
'consume_full_messages called on a buffer without a '
'complete first message')
if mtype != self._current_message_type:
raise BufferError(
'consume_full_messages called with a wrong mtype')
if self._current_message_len_unread != self._current_message_len - 4:
raise BufferError(
'consume_full_messages called on a partially read message')
cdef:
const char* cbuf
ssize_t cbuf_len
int32_t msg_len
ssize_t new_pos0
ssize_t pos_delta
int32_t done
while True:
buf.write_byte(mtype)
buf.write_int32(self._current_message_len)
cbuf = self.try_consume_message(&cbuf_len)
if cbuf != NULL:
buf.write_cstr(cbuf, cbuf_len)
else:
buf.write_bytes(self.consume_message())
if self._length > 0:
self._ensure_first_buf()
else:
return
if stop_at and buf._length >= stop_at:
return
# Fast path: exhaust buf0 as efficiently as possible.
if self._pos0 + 5 <= self._len0:
cbuf = cpython.PyBytes_AS_STRING(self._buf0)
new_pos0 = self._pos0
cbuf_len = self._len0
done = 0
# Scan the first buffer and find the position of the
# end of the last "mtype" message.
while new_pos0 + 5 <= cbuf_len:
if (cbuf + new_pos0)[0] != mtype:
done = 1
break
if (stop_at and
(buf._length + new_pos0 - self._pos0) > stop_at):
done = 1
break
msg_len = hton.unpack_int32(cbuf + new_pos0 + 1) + 1
if new_pos0 + msg_len > cbuf_len:
break
new_pos0 += msg_len
if new_pos0 != self._pos0:
assert self._pos0 < new_pos0 <= self._len0
pos_delta = new_pos0 - self._pos0
buf.write_cstr(
cbuf + self._pos0,
pos_delta)
self._pos0 = new_pos0
self._length -= pos_delta
assert self._length >= 0
if done:
# The next message is of a different type.
return
# Back to slow path.
if not self.take_message_type(mtype):
return
cdef bytearray consume_messages(self, char mtype):
"""Consume consecutive messages of the same type."""
cdef:
char *buf
ssize_t nbytes
ssize_t total_bytes = 0
bytearray result
if not self.take_message_type(mtype):
return None
# consume_messages is a volume-oriented method, so
# we assume that the remainder of the buffer will contain
# messages of the requested type.
result = cpythonx.PyByteArray_FromStringAndSize(NULL, self._length)
buf = cpythonx.PyByteArray_AsString(result)
while self.take_message_type(mtype):
self._ensure_first_buf()
nbytes = self._current_message_len_unread
self._read_into(buf, nbytes)
buf += nbytes
total_bytes += nbytes
self._finish_message()
# Clamp the result to an actual size read.
cpythonx.PyByteArray_Resize(result, total_bytes)
return result
cdef finish_message(self):
if self._current_message_type == 0 or not self._current_message_ready:
# The message has already been finished (e.g by consume_message()),
# or has been put back by put_message().
return
if self._current_message_len_unread:
if PG_DEBUG:
mtype = chr(self._current_message_type)
discarded = self.consume_message()
if PG_DEBUG:
print('!!! discarding message {!r} unread data: {!r}'.format(
mtype,
discarded))
self._finish_message()
cdef inline _finish_message(self):
self._current_message_type = 0
self._current_message_len = 0
self._current_message_ready = 0
self._current_message_len_unread = 0
@staticmethod
cdef ReadBuffer new_message_parser(object data):
cdef ReadBuffer buf
buf = ReadBuffer.__new__(ReadBuffer)
buf.feed_data(data)
buf._current_message_ready = 1
buf._current_message_len_unread = buf._len0
return buf

View File

@@ -0,0 +1,157 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef class CodecContext:
cpdef get_text_codec(self)
cdef is_encoding_utf8(self)
cpdef get_json_decoder(self)
cdef is_decoding_json(self)
cpdef get_json_encoder(self)
cdef is_encoding_json(self)
ctypedef object (*encode_func)(CodecContext settings,
WriteBuffer buf,
object obj)
ctypedef object (*decode_func)(CodecContext settings,
FRBuffer *buf)
# Datetime
cdef date_encode(CodecContext settings, WriteBuffer buf, obj)
cdef date_decode(CodecContext settings, FRBuffer * buf)
cdef date_encode_tuple(CodecContext settings, WriteBuffer buf, obj)
cdef date_decode_tuple(CodecContext settings, FRBuffer * buf)
cdef timestamp_encode(CodecContext settings, WriteBuffer buf, obj)
cdef timestamp_decode(CodecContext settings, FRBuffer * buf)
cdef timestamp_encode_tuple(CodecContext settings, WriteBuffer buf, obj)
cdef timestamp_decode_tuple(CodecContext settings, FRBuffer * buf)
cdef timestamptz_encode(CodecContext settings, WriteBuffer buf, obj)
cdef timestamptz_decode(CodecContext settings, FRBuffer * buf)
cdef time_encode(CodecContext settings, WriteBuffer buf, obj)
cdef time_decode(CodecContext settings, FRBuffer * buf)
cdef time_encode_tuple(CodecContext settings, WriteBuffer buf, obj)
cdef time_decode_tuple(CodecContext settings, FRBuffer * buf)
cdef timetz_encode(CodecContext settings, WriteBuffer buf, obj)
cdef timetz_decode(CodecContext settings, FRBuffer * buf)
cdef timetz_encode_tuple(CodecContext settings, WriteBuffer buf, obj)
cdef timetz_decode_tuple(CodecContext settings, FRBuffer * buf)
cdef interval_encode(CodecContext settings, WriteBuffer buf, obj)
cdef interval_decode(CodecContext settings, FRBuffer * buf)
cdef interval_encode_tuple(CodecContext settings, WriteBuffer buf, tuple obj)
cdef interval_decode_tuple(CodecContext settings, FRBuffer * buf)
# Bits
cdef bits_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef bits_decode(CodecContext settings, FRBuffer * buf)
# Bools
cdef bool_encode(CodecContext settings, WriteBuffer buf, obj)
cdef bool_decode(CodecContext settings, FRBuffer * buf)
# Geometry
cdef box_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef box_decode(CodecContext settings, FRBuffer * buf)
cdef line_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef line_decode(CodecContext settings, FRBuffer * buf)
cdef lseg_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef lseg_decode(CodecContext settings, FRBuffer * buf)
cdef point_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef point_decode(CodecContext settings, FRBuffer * buf)
cdef path_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef path_decode(CodecContext settings, FRBuffer * buf)
cdef poly_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef poly_decode(CodecContext settings, FRBuffer * buf)
cdef circle_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef circle_decode(CodecContext settings, FRBuffer * buf)
# Hstore
cdef hstore_encode(CodecContext settings, WriteBuffer buf, obj)
cdef hstore_decode(CodecContext settings, FRBuffer * buf)
# Ints
cdef int2_encode(CodecContext settings, WriteBuffer buf, obj)
cdef int2_decode(CodecContext settings, FRBuffer * buf)
cdef int4_encode(CodecContext settings, WriteBuffer buf, obj)
cdef int4_decode(CodecContext settings, FRBuffer * buf)
cdef uint4_encode(CodecContext settings, WriteBuffer buf, obj)
cdef uint4_decode(CodecContext settings, FRBuffer * buf)
cdef int8_encode(CodecContext settings, WriteBuffer buf, obj)
cdef int8_decode(CodecContext settings, FRBuffer * buf)
cdef uint8_encode(CodecContext settings, WriteBuffer buf, obj)
cdef uint8_decode(CodecContext settings, FRBuffer * buf)
# Floats
cdef float4_encode(CodecContext settings, WriteBuffer buf, obj)
cdef float4_decode(CodecContext settings, FRBuffer * buf)
cdef float8_encode(CodecContext settings, WriteBuffer buf, obj)
cdef float8_decode(CodecContext settings, FRBuffer * buf)
# JSON
cdef jsonb_encode(CodecContext settings, WriteBuffer buf, obj)
cdef jsonb_decode(CodecContext settings, FRBuffer * buf)
# JSON path
cdef jsonpath_encode(CodecContext settings, WriteBuffer buf, obj)
cdef jsonpath_decode(CodecContext settings, FRBuffer * buf)
# Text
cdef as_pg_string_and_size(
CodecContext settings, obj, char **cstr, ssize_t *size)
cdef text_encode(CodecContext settings, WriteBuffer buf, obj)
cdef text_decode(CodecContext settings, FRBuffer * buf)
# Bytea
cdef bytea_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef bytea_decode(CodecContext settings, FRBuffer * buf)
# UUID
cdef uuid_encode(CodecContext settings, WriteBuffer wbuf, obj)
cdef uuid_decode(CodecContext settings, FRBuffer * buf)
# Numeric
cdef numeric_encode_text(CodecContext settings, WriteBuffer buf, obj)
cdef numeric_decode_text(CodecContext settings, FRBuffer * buf)
cdef numeric_encode_binary(CodecContext settings, WriteBuffer buf, obj)
cdef numeric_decode_binary(CodecContext settings, FRBuffer * buf)
cdef numeric_decode_binary_ex(CodecContext settings, FRBuffer * buf,
bint trail_fract_zero)
# Void
cdef void_encode(CodecContext settings, WriteBuffer buf, obj)
cdef void_decode(CodecContext settings, FRBuffer * buf)
# tid
cdef tid_encode(CodecContext settings, WriteBuffer buf, obj)
cdef tid_decode(CodecContext settings, FRBuffer * buf)
# Network
cdef cidr_encode(CodecContext settings, WriteBuffer buf, obj)
cdef cidr_decode(CodecContext settings, FRBuffer * buf)
cdef inet_encode(CodecContext settings, WriteBuffer buf, obj)
cdef inet_decode(CodecContext settings, FRBuffer * buf)
# pg_snapshot
cdef pg_snapshot_encode(CodecContext settings, WriteBuffer buf, obj)
cdef pg_snapshot_decode(CodecContext settings, FRBuffer * buf)

View File

@@ -0,0 +1,47 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef bits_encode(CodecContext settings, WriteBuffer wbuf, obj):
cdef:
Py_buffer pybuf
bint pybuf_used = False
char *buf
ssize_t len
ssize_t bitlen
if cpython.PyBytes_CheckExact(obj):
buf = cpython.PyBytes_AS_STRING(obj)
len = cpython.Py_SIZE(obj)
bitlen = len * 8
elif isinstance(obj, pgproto_types.BitString):
cpython.PyBytes_AsStringAndSize(obj.bytes, &buf, &len)
bitlen = obj.__len__()
else:
cpython.PyObject_GetBuffer(obj, &pybuf, cpython.PyBUF_SIMPLE)
pybuf_used = True
buf = <char*>pybuf.buf
len = pybuf.len
bitlen = len * 8
try:
if bitlen > _MAXINT32:
raise ValueError('bit value too long')
wbuf.write_int32(4 + <int32_t>len)
wbuf.write_int32(<int32_t>bitlen)
wbuf.write_cstr(buf, len)
finally:
if pybuf_used:
cpython.PyBuffer_Release(&pybuf)
cdef bits_decode(CodecContext settings, FRBuffer *buf):
cdef:
int32_t bitlen = hton.unpack_int32(frb_read(buf, 4))
ssize_t buf_len = buf.len
bytes_ = cpython.PyBytes_FromStringAndSize(frb_read_all(buf), buf_len)
return pgproto_types.BitString.frombytes(bytes_, bitlen)

View File

@@ -0,0 +1,34 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef bytea_encode(CodecContext settings, WriteBuffer wbuf, obj):
cdef:
Py_buffer pybuf
bint pybuf_used = False
char *buf
ssize_t len
if cpython.PyBytes_CheckExact(obj):
buf = cpython.PyBytes_AS_STRING(obj)
len = cpython.Py_SIZE(obj)
else:
cpython.PyObject_GetBuffer(obj, &pybuf, cpython.PyBUF_SIMPLE)
pybuf_used = True
buf = <char*>pybuf.buf
len = pybuf.len
try:
wbuf.write_int32(<int32_t>len)
wbuf.write_cstr(buf, len)
finally:
if pybuf_used:
cpython.PyBuffer_Release(&pybuf)
cdef bytea_decode(CodecContext settings, FRBuffer *buf):
cdef ssize_t buf_len = buf.len
return cpython.PyBytes_FromStringAndSize(frb_read_all(buf), buf_len)

View File

@@ -0,0 +1,26 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef class CodecContext:
cpdef get_text_codec(self):
raise NotImplementedError
cdef is_encoding_utf8(self):
raise NotImplementedError
cpdef get_json_decoder(self):
raise NotImplementedError
cdef is_decoding_json(self):
return False
cpdef get_json_encoder(self):
raise NotImplementedError
cdef is_encoding_json(self):
return False

View File

@@ -0,0 +1,423 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cimport cpython.datetime
import datetime
cpython.datetime.import_datetime()
utc = datetime.timezone.utc
date_from_ordinal = datetime.date.fromordinal
timedelta = datetime.timedelta
pg_epoch_datetime = datetime.datetime(2000, 1, 1)
cdef int32_t pg_epoch_datetime_ts = \
<int32_t>cpython.PyLong_AsLong(int(pg_epoch_datetime.timestamp()))
pg_epoch_datetime_utc = datetime.datetime(2000, 1, 1, tzinfo=utc)
cdef int32_t pg_epoch_datetime_utc_ts = \
<int32_t>cpython.PyLong_AsLong(int(pg_epoch_datetime_utc.timestamp()))
pg_epoch_date = datetime.date(2000, 1, 1)
cdef int32_t pg_date_offset_ord = \
<int32_t>cpython.PyLong_AsLong(pg_epoch_date.toordinal())
# Binary representations of infinity for datetimes.
cdef int64_t pg_time64_infinity = 0x7fffffffffffffff
cdef int64_t pg_time64_negative_infinity = <int64_t>0x8000000000000000
cdef int32_t pg_date_infinity = 0x7fffffff
cdef int32_t pg_date_negative_infinity = <int32_t>0x80000000
infinity_datetime = datetime.datetime(
datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999)
cdef int32_t infinity_datetime_ord = <int32_t>cpython.PyLong_AsLong(
infinity_datetime.toordinal())
cdef int64_t infinity_datetime_ts = 252455615999999999
negative_infinity_datetime = datetime.datetime(
datetime.MINYEAR, 1, 1, 0, 0, 0, 0)
cdef int32_t negative_infinity_datetime_ord = <int32_t>cpython.PyLong_AsLong(
negative_infinity_datetime.toordinal())
cdef int64_t negative_infinity_datetime_ts = -63082281600000000
infinity_date = datetime.date(datetime.MAXYEAR, 12, 31)
cdef int32_t infinity_date_ord = <int32_t>cpython.PyLong_AsLong(
infinity_date.toordinal())
negative_infinity_date = datetime.date(datetime.MINYEAR, 1, 1)
cdef int32_t negative_infinity_date_ord = <int32_t>cpython.PyLong_AsLong(
negative_infinity_date.toordinal())
cdef inline _local_timezone():
d = datetime.datetime.now(datetime.timezone.utc).astimezone()
return datetime.timezone(d.utcoffset())
cdef inline _encode_time(WriteBuffer buf, int64_t seconds,
int32_t microseconds):
# XXX: add support for double timestamps
# int64 timestamps,
cdef int64_t ts = seconds * 1000000 + microseconds
if ts == infinity_datetime_ts:
buf.write_int64(pg_time64_infinity)
elif ts == negative_infinity_datetime_ts:
buf.write_int64(pg_time64_negative_infinity)
else:
buf.write_int64(ts)
cdef inline int32_t _decode_time(FRBuffer *buf, int64_t *seconds,
int32_t *microseconds):
cdef int64_t ts = hton.unpack_int64(frb_read(buf, 8))
if ts == pg_time64_infinity:
return 1
elif ts == pg_time64_negative_infinity:
return -1
else:
seconds[0] = ts // 1000000
microseconds[0] = <int32_t>(ts % 1000000)
return 0
cdef date_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
int32_t ordinal = <int32_t>cpython.PyLong_AsLong(obj.toordinal())
int32_t pg_ordinal
if ordinal == infinity_date_ord:
pg_ordinal = pg_date_infinity
elif ordinal == negative_infinity_date_ord:
pg_ordinal = pg_date_negative_infinity
else:
pg_ordinal = ordinal - pg_date_offset_ord
buf.write_int32(4)
buf.write_int32(pg_ordinal)
cdef date_encode_tuple(CodecContext settings, WriteBuffer buf, obj):
cdef:
int32_t pg_ordinal
if len(obj) != 1:
raise ValueError(
'date tuple encoder: expecting 1 element '
'in tuple, got {}'.format(len(obj)))
pg_ordinal = obj[0]
buf.write_int32(4)
buf.write_int32(pg_ordinal)
cdef date_decode(CodecContext settings, FRBuffer *buf):
cdef int32_t pg_ordinal = hton.unpack_int32(frb_read(buf, 4))
if pg_ordinal == pg_date_infinity:
return infinity_date
elif pg_ordinal == pg_date_negative_infinity:
return negative_infinity_date
else:
return date_from_ordinal(pg_ordinal + pg_date_offset_ord)
cdef date_decode_tuple(CodecContext settings, FRBuffer *buf):
cdef int32_t pg_ordinal = hton.unpack_int32(frb_read(buf, 4))
return (pg_ordinal,)
cdef timestamp_encode(CodecContext settings, WriteBuffer buf, obj):
if not cpython.datetime.PyDateTime_Check(obj):
if cpython.datetime.PyDate_Check(obj):
obj = datetime.datetime(obj.year, obj.month, obj.day)
else:
raise TypeError(
'expected a datetime.date or datetime.datetime instance, '
'got {!r}'.format(type(obj).__name__)
)
delta = obj - pg_epoch_datetime
cdef:
int64_t seconds = cpython.PyLong_AsLongLong(delta.days) * 86400 + \
cpython.PyLong_AsLong(delta.seconds)
int32_t microseconds = <int32_t>cpython.PyLong_AsLong(
delta.microseconds)
buf.write_int32(8)
_encode_time(buf, seconds, microseconds)
cdef timestamp_encode_tuple(CodecContext settings, WriteBuffer buf, obj):
cdef:
int64_t microseconds
if len(obj) != 1:
raise ValueError(
'timestamp tuple encoder: expecting 1 element '
'in tuple, got {}'.format(len(obj)))
microseconds = obj[0]
buf.write_int32(8)
buf.write_int64(microseconds)
cdef timestamp_decode(CodecContext settings, FRBuffer *buf):
cdef:
int64_t seconds = 0
int32_t microseconds = 0
int32_t inf = _decode_time(buf, &seconds, &microseconds)
if inf > 0:
# positive infinity
return infinity_datetime
elif inf < 0:
# negative infinity
return negative_infinity_datetime
else:
return pg_epoch_datetime.__add__(
timedelta(0, seconds, microseconds))
cdef timestamp_decode_tuple(CodecContext settings, FRBuffer *buf):
cdef:
int64_t ts = hton.unpack_int64(frb_read(buf, 8))
return (ts,)
cdef timestamptz_encode(CodecContext settings, WriteBuffer buf, obj):
if not cpython.datetime.PyDateTime_Check(obj):
if cpython.datetime.PyDate_Check(obj):
obj = datetime.datetime(obj.year, obj.month, obj.day,
tzinfo=_local_timezone())
else:
raise TypeError(
'expected a datetime.date or datetime.datetime instance, '
'got {!r}'.format(type(obj).__name__)
)
buf.write_int32(8)
if obj == infinity_datetime:
buf.write_int64(pg_time64_infinity)
return
elif obj == negative_infinity_datetime:
buf.write_int64(pg_time64_negative_infinity)
return
utc_dt = obj.astimezone(utc)
delta = utc_dt - pg_epoch_datetime_utc
cdef:
int64_t seconds = cpython.PyLong_AsLongLong(delta.days) * 86400 + \
cpython.PyLong_AsLong(delta.seconds)
int32_t microseconds = <int32_t>cpython.PyLong_AsLong(
delta.microseconds)
_encode_time(buf, seconds, microseconds)
cdef timestamptz_decode(CodecContext settings, FRBuffer *buf):
cdef:
int64_t seconds = 0
int32_t microseconds = 0
int32_t inf = _decode_time(buf, &seconds, &microseconds)
if inf > 0:
# positive infinity
return infinity_datetime
elif inf < 0:
# negative infinity
return negative_infinity_datetime
else:
return pg_epoch_datetime_utc.__add__(
timedelta(0, seconds, microseconds))
cdef time_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
int64_t seconds = cpython.PyLong_AsLong(obj.hour) * 3600 + \
cpython.PyLong_AsLong(obj.minute) * 60 + \
cpython.PyLong_AsLong(obj.second)
int32_t microseconds = <int32_t>cpython.PyLong_AsLong(obj.microsecond)
buf.write_int32(8)
_encode_time(buf, seconds, microseconds)
cdef time_encode_tuple(CodecContext settings, WriteBuffer buf, obj):
cdef:
int64_t microseconds
if len(obj) != 1:
raise ValueError(
'time tuple encoder: expecting 1 element '
'in tuple, got {}'.format(len(obj)))
microseconds = obj[0]
buf.write_int32(8)
buf.write_int64(microseconds)
cdef time_decode(CodecContext settings, FRBuffer *buf):
cdef:
int64_t seconds = 0
int32_t microseconds = 0
_decode_time(buf, &seconds, &microseconds)
cdef:
int64_t minutes = <int64_t>(seconds / 60)
int64_t sec = seconds % 60
int64_t hours = <int64_t>(minutes / 60)
int64_t min = minutes % 60
return datetime.time(hours, min, sec, microseconds)
cdef time_decode_tuple(CodecContext settings, FRBuffer *buf):
cdef:
int64_t ts = hton.unpack_int64(frb_read(buf, 8))
return (ts,)
cdef timetz_encode(CodecContext settings, WriteBuffer buf, obj):
offset = obj.tzinfo.utcoffset(None)
cdef:
int32_t offset_sec = \
<int32_t>cpython.PyLong_AsLong(offset.days) * 24 * 60 * 60 + \
<int32_t>cpython.PyLong_AsLong(offset.seconds)
int64_t seconds = cpython.PyLong_AsLong(obj.hour) * 3600 + \
cpython.PyLong_AsLong(obj.minute) * 60 + \
cpython.PyLong_AsLong(obj.second)
int32_t microseconds = <int32_t>cpython.PyLong_AsLong(obj.microsecond)
buf.write_int32(12)
_encode_time(buf, seconds, microseconds)
# In Python utcoffset() is the difference between the local time
# and the UTC, whereas in PostgreSQL it's the opposite,
# so we need to flip the sign.
buf.write_int32(-offset_sec)
cdef timetz_encode_tuple(CodecContext settings, WriteBuffer buf, obj):
cdef:
int64_t microseconds
int32_t offset_sec
if len(obj) != 2:
raise ValueError(
'time tuple encoder: expecting 2 elements2 '
'in tuple, got {}'.format(len(obj)))
microseconds = obj[0]
offset_sec = obj[1]
buf.write_int32(12)
buf.write_int64(microseconds)
buf.write_int32(offset_sec)
cdef timetz_decode(CodecContext settings, FRBuffer *buf):
time = time_decode(settings, buf)
cdef int32_t offset = <int32_t>(hton.unpack_int32(frb_read(buf, 4)) / 60)
# See the comment in the `timetz_encode` method.
return time.replace(tzinfo=datetime.timezone(timedelta(minutes=-offset)))
cdef timetz_decode_tuple(CodecContext settings, FRBuffer *buf):
cdef:
int64_t microseconds = hton.unpack_int64(frb_read(buf, 8))
int32_t offset_sec = hton.unpack_int32(frb_read(buf, 4))
return (microseconds, offset_sec)
cdef interval_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
int32_t days = <int32_t>cpython.PyLong_AsLong(obj.days)
int64_t seconds = cpython.PyLong_AsLongLong(obj.seconds)
int32_t microseconds = <int32_t>cpython.PyLong_AsLong(obj.microseconds)
buf.write_int32(16)
_encode_time(buf, seconds, microseconds)
buf.write_int32(days)
buf.write_int32(0) # Months
cdef interval_encode_tuple(CodecContext settings, WriteBuffer buf,
tuple obj):
cdef:
int32_t months
int32_t days
int64_t microseconds
if len(obj) != 3:
raise ValueError(
'interval tuple encoder: expecting 3 elements '
'in tuple, got {}'.format(len(obj)))
months = obj[0]
days = obj[1]
microseconds = obj[2]
buf.write_int32(16)
buf.write_int64(microseconds)
buf.write_int32(days)
buf.write_int32(months)
cdef interval_decode(CodecContext settings, FRBuffer *buf):
cdef:
int32_t days
int32_t months
int32_t years
int64_t seconds = 0
int32_t microseconds = 0
_decode_time(buf, &seconds, &microseconds)
days = hton.unpack_int32(frb_read(buf, 4))
months = hton.unpack_int32(frb_read(buf, 4))
if months < 0:
years = -<int32_t>(-months // 12)
months = -<int32_t>(-months % 12)
else:
years = <int32_t>(months // 12)
months = <int32_t>(months % 12)
return datetime.timedelta(days=days + months * 30 + years * 365,
seconds=seconds, microseconds=microseconds)
cdef interval_decode_tuple(CodecContext settings, FRBuffer *buf):
cdef:
int32_t days
int32_t months
int64_t microseconds
microseconds = hton.unpack_int64(frb_read(buf, 8))
days = hton.unpack_int32(frb_read(buf, 4))
months = hton.unpack_int32(frb_read(buf, 4))
return (months, days, microseconds)

View File

@@ -0,0 +1,34 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
from libc cimport math
cdef float4_encode(CodecContext settings, WriteBuffer buf, obj):
cdef double dval = cpython.PyFloat_AsDouble(obj)
cdef float fval = <float>dval
if math.isinf(fval) and not math.isinf(dval):
raise ValueError('value out of float32 range')
buf.write_int32(4)
buf.write_float(fval)
cdef float4_decode(CodecContext settings, FRBuffer *buf):
cdef float f = hton.unpack_float(frb_read(buf, 4))
return cpython.PyFloat_FromDouble(f)
cdef float8_encode(CodecContext settings, WriteBuffer buf, obj):
cdef double dval = cpython.PyFloat_AsDouble(obj)
buf.write_int32(8)
buf.write_double(dval)
cdef float8_decode(CodecContext settings, FRBuffer *buf):
cdef double f = hton.unpack_double(frb_read(buf, 8))
return cpython.PyFloat_FromDouble(f)

View File

@@ -0,0 +1,164 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef inline _encode_points(WriteBuffer wbuf, object points):
cdef object point
for point in points:
wbuf.write_double(point[0])
wbuf.write_double(point[1])
cdef inline _decode_points(FRBuffer *buf):
cdef:
int32_t npts = hton.unpack_int32(frb_read(buf, 4))
pts = cpython.PyTuple_New(npts)
int32_t i
object point
double x
double y
for i in range(npts):
x = hton.unpack_double(frb_read(buf, 8))
y = hton.unpack_double(frb_read(buf, 8))
point = pgproto_types.Point(x, y)
cpython.Py_INCREF(point)
cpython.PyTuple_SET_ITEM(pts, i, point)
return pts
cdef box_encode(CodecContext settings, WriteBuffer wbuf, obj):
wbuf.write_int32(32)
_encode_points(wbuf, (obj[0], obj[1]))
cdef box_decode(CodecContext settings, FRBuffer *buf):
cdef:
double high_x = hton.unpack_double(frb_read(buf, 8))
double high_y = hton.unpack_double(frb_read(buf, 8))
double low_x = hton.unpack_double(frb_read(buf, 8))
double low_y = hton.unpack_double(frb_read(buf, 8))
return pgproto_types.Box(
pgproto_types.Point(high_x, high_y),
pgproto_types.Point(low_x, low_y))
cdef line_encode(CodecContext settings, WriteBuffer wbuf, obj):
wbuf.write_int32(24)
wbuf.write_double(obj[0])
wbuf.write_double(obj[1])
wbuf.write_double(obj[2])
cdef line_decode(CodecContext settings, FRBuffer *buf):
cdef:
double A = hton.unpack_double(frb_read(buf, 8))
double B = hton.unpack_double(frb_read(buf, 8))
double C = hton.unpack_double(frb_read(buf, 8))
return pgproto_types.Line(A, B, C)
cdef lseg_encode(CodecContext settings, WriteBuffer wbuf, obj):
wbuf.write_int32(32)
_encode_points(wbuf, (obj[0], obj[1]))
cdef lseg_decode(CodecContext settings, FRBuffer *buf):
cdef:
double p1_x = hton.unpack_double(frb_read(buf, 8))
double p1_y = hton.unpack_double(frb_read(buf, 8))
double p2_x = hton.unpack_double(frb_read(buf, 8))
double p2_y = hton.unpack_double(frb_read(buf, 8))
return pgproto_types.LineSegment((p1_x, p1_y), (p2_x, p2_y))
cdef point_encode(CodecContext settings, WriteBuffer wbuf, obj):
wbuf.write_int32(16)
wbuf.write_double(obj[0])
wbuf.write_double(obj[1])
cdef point_decode(CodecContext settings, FRBuffer *buf):
cdef:
double x = hton.unpack_double(frb_read(buf, 8))
double y = hton.unpack_double(frb_read(buf, 8))
return pgproto_types.Point(x, y)
cdef path_encode(CodecContext settings, WriteBuffer wbuf, obj):
cdef:
int8_t is_closed = 0
ssize_t npts
ssize_t encoded_len
int32_t i
if cpython.PyTuple_Check(obj):
is_closed = 1
elif cpython.PyList_Check(obj):
is_closed = 0
elif isinstance(obj, pgproto_types.Path):
is_closed = obj.is_closed
npts = len(obj)
encoded_len = 1 + 4 + 16 * npts
if encoded_len > _MAXINT32:
raise ValueError('path value too long')
wbuf.write_int32(<int32_t>encoded_len)
wbuf.write_byte(is_closed)
wbuf.write_int32(<int32_t>npts)
_encode_points(wbuf, obj)
cdef path_decode(CodecContext settings, FRBuffer *buf):
cdef:
int8_t is_closed = <int8_t>(frb_read(buf, 1)[0])
return pgproto_types.Path(*_decode_points(buf), is_closed=is_closed == 1)
cdef poly_encode(CodecContext settings, WriteBuffer wbuf, obj):
cdef:
bint is_closed
ssize_t npts
ssize_t encoded_len
int32_t i
npts = len(obj)
encoded_len = 4 + 16 * npts
if encoded_len > _MAXINT32:
raise ValueError('polygon value too long')
wbuf.write_int32(<int32_t>encoded_len)
wbuf.write_int32(<int32_t>npts)
_encode_points(wbuf, obj)
cdef poly_decode(CodecContext settings, FRBuffer *buf):
return pgproto_types.Polygon(*_decode_points(buf))
cdef circle_encode(CodecContext settings, WriteBuffer wbuf, obj):
wbuf.write_int32(24)
wbuf.write_double(obj[0][0])
wbuf.write_double(obj[0][1])
wbuf.write_double(obj[1])
cdef circle_decode(CodecContext settings, FRBuffer *buf):
cdef:
double center_x = hton.unpack_double(frb_read(buf, 8))
double center_y = hton.unpack_double(frb_read(buf, 8))
double radius = hton.unpack_double(frb_read(buf, 8))
return pgproto_types.Circle((center_x, center_y), radius)

View File

@@ -0,0 +1,73 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef hstore_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
char *str
ssize_t size
ssize_t count
object items
WriteBuffer item_buf = WriteBuffer.new()
count = len(obj)
if count > _MAXINT32:
raise ValueError('hstore value is too large')
item_buf.write_int32(<int32_t>count)
if hasattr(obj, 'items'):
items = obj.items()
else:
items = obj
for k, v in items:
if k is None:
raise ValueError('null value not allowed in hstore key')
as_pg_string_and_size(settings, k, &str, &size)
item_buf.write_int32(<int32_t>size)
item_buf.write_cstr(str, size)
if v is None:
item_buf.write_int32(<int32_t>-1)
else:
as_pg_string_and_size(settings, v, &str, &size)
item_buf.write_int32(<int32_t>size)
item_buf.write_cstr(str, size)
buf.write_int32(item_buf.len())
buf.write_buffer(item_buf)
cdef hstore_decode(CodecContext settings, FRBuffer *buf):
cdef:
dict result
uint32_t elem_count
int32_t elem_len
uint32_t i
str k
str v
result = {}
elem_count = <uint32_t>hton.unpack_int32(frb_read(buf, 4))
if elem_count == 0:
return result
for i in range(elem_count):
elem_len = hton.unpack_int32(frb_read(buf, 4))
if elem_len < 0:
raise ValueError('null value not allowed in hstore key')
k = decode_pg_string(settings, frb_read(buf, elem_len), elem_len)
elem_len = hton.unpack_int32(frb_read(buf, 4))
if elem_len < 0:
v = None
else:
v = decode_pg_string(settings, frb_read(buf, elem_len), elem_len)
result[k] = v
return result

View File

@@ -0,0 +1,144 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef bool_encode(CodecContext settings, WriteBuffer buf, obj):
if not cpython.PyBool_Check(obj):
raise TypeError('a boolean is required (got type {})'.format(
type(obj).__name__))
buf.write_int32(1)
buf.write_byte(b'\x01' if obj is True else b'\x00')
cdef bool_decode(CodecContext settings, FRBuffer *buf):
return frb_read(buf, 1)[0] is b'\x01'
cdef int2_encode(CodecContext settings, WriteBuffer buf, obj):
cdef int overflow = 0
cdef long val
try:
if type(obj) is not int and hasattr(type(obj), '__int__'):
# Silence a Python warning about implicit __int__
# conversion.
obj = int(obj)
val = cpython.PyLong_AsLong(obj)
except OverflowError:
overflow = 1
if overflow or val < INT16_MIN or val > INT16_MAX:
raise OverflowError('value out of int16 range')
buf.write_int32(2)
buf.write_int16(<int16_t>val)
cdef int2_decode(CodecContext settings, FRBuffer *buf):
return cpython.PyLong_FromLong(hton.unpack_int16(frb_read(buf, 2)))
cdef int4_encode(CodecContext settings, WriteBuffer buf, obj):
cdef int overflow = 0
cdef long val = 0
try:
if type(obj) is not int and hasattr(type(obj), '__int__'):
# Silence a Python warning about implicit __int__
# conversion.
obj = int(obj)
val = cpython.PyLong_AsLong(obj)
except OverflowError:
overflow = 1
# "long" and "long long" have the same size for x86_64, need an extra check
if overflow or (sizeof(val) > 4 and (val < INT32_MIN or val > INT32_MAX)):
raise OverflowError('value out of int32 range')
buf.write_int32(4)
buf.write_int32(<int32_t>val)
cdef int4_decode(CodecContext settings, FRBuffer *buf):
return cpython.PyLong_FromLong(hton.unpack_int32(frb_read(buf, 4)))
cdef uint4_encode(CodecContext settings, WriteBuffer buf, obj):
cdef int overflow = 0
cdef unsigned long val = 0
try:
if type(obj) is not int and hasattr(type(obj), '__int__'):
# Silence a Python warning about implicit __int__
# conversion.
obj = int(obj)
val = cpython.PyLong_AsUnsignedLong(obj)
except OverflowError:
overflow = 1
# "long" and "long long" have the same size for x86_64, need an extra check
if overflow or (sizeof(val) > 4 and val > UINT32_MAX):
raise OverflowError('value out of uint32 range')
buf.write_int32(4)
buf.write_int32(<int32_t>val)
cdef uint4_decode(CodecContext settings, FRBuffer *buf):
return cpython.PyLong_FromUnsignedLong(
<uint32_t>hton.unpack_int32(frb_read(buf, 4)))
cdef int8_encode(CodecContext settings, WriteBuffer buf, obj):
cdef int overflow = 0
cdef long long val
try:
if type(obj) is not int and hasattr(type(obj), '__int__'):
# Silence a Python warning about implicit __int__
# conversion.
obj = int(obj)
val = cpython.PyLong_AsLongLong(obj)
except OverflowError:
overflow = 1
# Just in case for systems with "long long" bigger than 8 bytes
if overflow or (sizeof(val) > 8 and (val < INT64_MIN or val > INT64_MAX)):
raise OverflowError('value out of int64 range')
buf.write_int32(8)
buf.write_int64(<int64_t>val)
cdef int8_decode(CodecContext settings, FRBuffer *buf):
return cpython.PyLong_FromLongLong(hton.unpack_int64(frb_read(buf, 8)))
cdef uint8_encode(CodecContext settings, WriteBuffer buf, obj):
cdef int overflow = 0
cdef unsigned long long val = 0
try:
if type(obj) is not int and hasattr(type(obj), '__int__'):
# Silence a Python warning about implicit __int__
# conversion.
obj = int(obj)
val = cpython.PyLong_AsUnsignedLongLong(obj)
except OverflowError:
overflow = 1
# Just in case for systems with "long long" bigger than 8 bytes
if overflow or (sizeof(val) > 8 and val > UINT64_MAX):
raise OverflowError('value out of uint64 range')
buf.write_int32(8)
buf.write_int64(<int64_t>val)
cdef uint8_decode(CodecContext settings, FRBuffer *buf):
return cpython.PyLong_FromUnsignedLongLong(
<uint64_t>hton.unpack_int64(frb_read(buf, 8)))

View File

@@ -0,0 +1,57 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef jsonb_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
char *str
ssize_t size
if settings.is_encoding_json():
obj = settings.get_json_encoder().encode(obj)
as_pg_string_and_size(settings, obj, &str, &size)
if size > 0x7fffffff - 1:
raise ValueError('string too long')
buf.write_int32(<int32_t>size + 1)
buf.write_byte(1) # JSONB format version
buf.write_cstr(str, size)
cdef jsonb_decode(CodecContext settings, FRBuffer *buf):
cdef uint8_t format = <uint8_t>(frb_read(buf, 1)[0])
if format != 1:
raise ValueError('unexpected JSONB format: {}'.format(format))
rv = text_decode(settings, buf)
if settings.is_decoding_json():
rv = settings.get_json_decoder().decode(rv)
return rv
cdef json_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
char *str
ssize_t size
if settings.is_encoding_json():
obj = settings.get_json_encoder().encode(obj)
text_encode(settings, buf, obj)
cdef json_decode(CodecContext settings, FRBuffer *buf):
rv = text_decode(settings, buf)
if settings.is_decoding_json():
rv = settings.get_json_decoder().decode(rv)
return rv

View File

@@ -0,0 +1,29 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef jsonpath_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
char *str
ssize_t size
as_pg_string_and_size(settings, obj, &str, &size)
if size > 0x7fffffff - 1:
raise ValueError('string too long')
buf.write_int32(<int32_t>size + 1)
buf.write_byte(1) # jsonpath format version
buf.write_cstr(str, size)
cdef jsonpath_decode(CodecContext settings, FRBuffer *buf):
cdef uint8_t format = <uint8_t>(frb_read(buf, 1)[0])
if format != 1:
raise ValueError('unexpected jsonpath format: {}'.format(format))
return text_decode(settings, buf)

View File

@@ -0,0 +1,16 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef void_encode(CodecContext settings, WriteBuffer buf, obj):
# Void is zero bytes
buf.write_int32(0)
cdef void_decode(CodecContext settings, FRBuffer *buf):
# Do nothing; void will be passed as NULL so this function
# will never be called.
pass

View File

@@ -0,0 +1,139 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import ipaddress
# defined in postgresql/src/include/inet.h
#
DEF PGSQL_AF_INET = 2 # AF_INET
DEF PGSQL_AF_INET6 = 3 # AF_INET + 1
_ipaddr = ipaddress.ip_address
_ipiface = ipaddress.ip_interface
_ipnet = ipaddress.ip_network
cdef inline uint8_t _ip_max_prefix_len(int32_t family):
# Maximum number of bits in the network prefix of the specified
# IP protocol version.
if family == PGSQL_AF_INET:
return 32
else:
return 128
cdef inline int32_t _ip_addr_len(int32_t family):
# Length of address in bytes for the specified IP protocol version.
if family == PGSQL_AF_INET:
return 4
else:
return 16
cdef inline int8_t _ver_to_family(int32_t version):
if version == 4:
return PGSQL_AF_INET
else:
return PGSQL_AF_INET6
cdef inline _net_encode(WriteBuffer buf, int8_t family, uint32_t bits,
int8_t is_cidr, bytes addr):
cdef:
char *addrbytes
ssize_t addrlen
cpython.PyBytes_AsStringAndSize(addr, &addrbytes, &addrlen)
buf.write_int32(4 + <int32_t>addrlen)
buf.write_byte(family)
buf.write_byte(<int8_t>bits)
buf.write_byte(is_cidr)
buf.write_byte(<int8_t>addrlen)
buf.write_cstr(addrbytes, addrlen)
cdef net_decode(CodecContext settings, FRBuffer *buf, bint as_cidr):
cdef:
int32_t family = <int32_t>frb_read(buf, 1)[0]
uint8_t bits = <uint8_t>frb_read(buf, 1)[0]
int prefix_len
int32_t is_cidr = <int32_t>frb_read(buf, 1)[0]
int32_t addrlen = <int32_t>frb_read(buf, 1)[0]
bytes addr
uint8_t max_prefix_len = _ip_max_prefix_len(family)
if is_cidr != as_cidr:
raise ValueError('unexpected CIDR flag set in non-cidr value')
if family != PGSQL_AF_INET and family != PGSQL_AF_INET6:
raise ValueError('invalid address family in "{}" value'.format(
'cidr' if is_cidr else 'inet'
))
max_prefix_len = _ip_max_prefix_len(family)
if bits > max_prefix_len:
raise ValueError('invalid network prefix length in "{}" value'.format(
'cidr' if is_cidr else 'inet'
))
if addrlen != _ip_addr_len(family):
raise ValueError('invalid address length in "{}" value'.format(
'cidr' if is_cidr else 'inet'
))
addr = cpython.PyBytes_FromStringAndSize(frb_read(buf, addrlen), addrlen)
if as_cidr or bits != max_prefix_len:
prefix_len = cpython.PyLong_FromLong(bits)
if as_cidr:
return _ipnet((addr, prefix_len))
else:
return _ipiface((addr, prefix_len))
else:
return _ipaddr(addr)
cdef cidr_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
object ipnet
int8_t family
ipnet = _ipnet(obj)
family = _ver_to_family(ipnet.version)
_net_encode(buf, family, ipnet.prefixlen, 1, ipnet.network_address.packed)
cdef cidr_decode(CodecContext settings, FRBuffer *buf):
return net_decode(settings, buf, True)
cdef inet_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
object ipaddr
int8_t family
try:
ipaddr = _ipaddr(obj)
except ValueError:
# PostgreSQL accepts *both* CIDR and host values
# for the host datatype.
ipaddr = _ipiface(obj)
family = _ver_to_family(ipaddr.version)
_net_encode(buf, family, ipaddr.network.prefixlen, 1, ipaddr.packed)
else:
family = _ver_to_family(ipaddr.version)
_net_encode(buf, family, _ip_max_prefix_len(family), 0, ipaddr.packed)
cdef inet_decode(CodecContext settings, FRBuffer *buf):
return net_decode(settings, buf, False)

View File

@@ -0,0 +1,356 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
from libc.math cimport abs, log10
from libc.stdio cimport snprintf
import decimal
# defined in postgresql/src/backend/utils/adt/numeric.c
DEF DEC_DIGITS = 4
DEF MAX_DSCALE = 0x3FFF
DEF NUMERIC_POS = 0x0000
DEF NUMERIC_NEG = 0x4000
DEF NUMERIC_NAN = 0xC000
DEF NUMERIC_PINF = 0xD000
DEF NUMERIC_NINF = 0xF000
_Dec = decimal.Decimal
cdef numeric_encode_text(CodecContext settings, WriteBuffer buf, obj):
text_encode(settings, buf, str(obj))
cdef numeric_decode_text(CodecContext settings, FRBuffer *buf):
return _Dec(text_decode(settings, buf))
cdef numeric_encode_binary(CodecContext settings, WriteBuffer buf, obj):
cdef:
object dec
object dt
int64_t exponent
int64_t i
int64_t j
tuple pydigits
int64_t num_pydigits
int16_t pgdigit
int64_t num_pgdigits
int16_t dscale
int64_t dweight
int64_t weight
uint16_t sign
int64_t padding_size = 0
if isinstance(obj, _Dec):
dec = obj
else:
dec = _Dec(obj)
dt = dec.as_tuple()
if dt.exponent == 'n' or dt.exponent == 'N':
# NaN
sign = NUMERIC_NAN
num_pgdigits = 0
weight = 0
dscale = 0
elif dt.exponent == 'F':
# Infinity
if dt.sign:
sign = NUMERIC_NINF
else:
sign = NUMERIC_PINF
num_pgdigits = 0
weight = 0
dscale = 0
else:
exponent = dt.exponent
if exponent < 0 and -exponent > MAX_DSCALE:
raise ValueError(
'cannot encode Decimal value into numeric: '
'exponent is too small')
if dt.sign:
sign = NUMERIC_NEG
else:
sign = NUMERIC_POS
pydigits = dt.digits
num_pydigits = len(pydigits)
dweight = num_pydigits + exponent - 1
if dweight >= 0:
weight = (dweight + DEC_DIGITS) // DEC_DIGITS - 1
else:
weight = -((-dweight - 1) // DEC_DIGITS + 1)
if weight > 2 ** 16 - 1:
raise ValueError(
'cannot encode Decimal value into numeric: '
'exponent is too large')
padding_size = \
(weight + 1) * DEC_DIGITS - (dweight + 1)
num_pgdigits = \
(num_pydigits + padding_size + DEC_DIGITS - 1) // DEC_DIGITS
if num_pgdigits > 2 ** 16 - 1:
raise ValueError(
'cannot encode Decimal value into numeric: '
'number of digits is too large')
# Pad decimal digits to provide room for correct Postgres
# digit alignment in the digit computation loop.
pydigits = (0,) * DEC_DIGITS + pydigits + (0,) * DEC_DIGITS
if exponent < 0:
if -exponent > MAX_DSCALE:
raise ValueError(
'cannot encode Decimal value into numeric: '
'exponent is too small')
dscale = <int16_t>-exponent
else:
dscale = 0
buf.write_int32(2 + 2 + 2 + 2 + 2 * <uint16_t>num_pgdigits)
buf.write_int16(<int16_t>num_pgdigits)
buf.write_int16(<int16_t>weight)
buf.write_int16(<int16_t>sign)
buf.write_int16(dscale)
j = DEC_DIGITS - padding_size
for i in range(num_pgdigits):
pgdigit = (pydigits[j] * 1000 + pydigits[j + 1] * 100 +
pydigits[j + 2] * 10 + pydigits[j + 3])
j += DEC_DIGITS
buf.write_int16(pgdigit)
# The decoding strategy here is to form a string representation of
# the numeric var, as it is faster than passing an iterable of digits.
# For this reason the below code is pure overhead and is ~25% slower
# than the simple text decoder above. That said, we need the binary
# decoder to support binary COPY with numeric values.
cdef numeric_decode_binary_ex(
CodecContext settings,
FRBuffer *buf,
bint trail_fract_zero,
):
cdef:
uint16_t num_pgdigits = <uint16_t>hton.unpack_int16(frb_read(buf, 2))
int16_t weight = hton.unpack_int16(frb_read(buf, 2))
uint16_t sign = <uint16_t>hton.unpack_int16(frb_read(buf, 2))
uint16_t dscale = <uint16_t>hton.unpack_int16(frb_read(buf, 2))
int16_t pgdigit0
ssize_t i
int16_t pgdigit
object pydigits
ssize_t num_pydigits
ssize_t actual_num_pydigits
ssize_t buf_size
int64_t exponent
int64_t abs_exponent
ssize_t exponent_chars
ssize_t front_padding = 0
ssize_t num_fract_digits
ssize_t trailing_fract_zeros_adj
char smallbuf[_NUMERIC_DECODER_SMALLBUF_SIZE]
char *charbuf
char *bufptr
bint buf_allocated = False
if sign == NUMERIC_NAN:
# Not-a-number
return _Dec('NaN')
elif sign == NUMERIC_PINF:
# +Infinity
return _Dec('Infinity')
elif sign == NUMERIC_NINF:
# -Infinity
return _Dec('-Infinity')
if num_pgdigits == 0:
# Zero
return _Dec('0e-' + str(dscale))
pgdigit0 = hton.unpack_int16(frb_read(buf, 2))
if weight >= 0:
if pgdigit0 < 10:
front_padding = 3
elif pgdigit0 < 100:
front_padding = 2
elif pgdigit0 < 1000:
front_padding = 1
# The number of fractional decimal digits actually encoded in
# base-DEC_DEIGITS digits sent by Postgres.
num_fract_digits = (num_pgdigits - weight - 1) * DEC_DIGITS
# The trailing zero adjustment necessary to obtain exactly
# dscale number of fractional digits in output. May be negative,
# which indicates that trailing zeros in the last input digit
# should be discarded.
trailing_fract_zeros_adj = dscale - num_fract_digits
# Maximum possible number of decimal digits in base 10.
# The actual number might be up to 3 digits smaller due to
# leading zeros in first input digit.
num_pydigits = num_pgdigits * DEC_DIGITS
if trailing_fract_zeros_adj > 0:
num_pydigits += trailing_fract_zeros_adj
# Exponent.
exponent = (weight + 1) * DEC_DIGITS - front_padding
abs_exponent = abs(exponent)
if abs_exponent != 0:
# Number of characters required to render absolute exponent value
# in decimal.
exponent_chars = <ssize_t>log10(<double>abs_exponent) + 1
else:
exponent_chars = 0
# Output buffer size.
buf_size = (
1 + # sign
1 + # leading zero
1 + # decimal dot
num_pydigits + # digits
1 + # possible trailing zero padding
2 + # exponent indicator (E-,E+)
exponent_chars + # exponent
1 # null terminator char
)
if buf_size > _NUMERIC_DECODER_SMALLBUF_SIZE:
charbuf = <char *>cpython.PyMem_Malloc(<size_t>buf_size)
buf_allocated = True
else:
charbuf = smallbuf
try:
bufptr = charbuf
if sign == NUMERIC_NEG:
bufptr[0] = b'-'
bufptr += 1
bufptr[0] = b'0'
bufptr[1] = b'.'
bufptr += 2
if weight >= 0:
bufptr = _unpack_digit_stripping_lzeros(bufptr, pgdigit0)
else:
bufptr = _unpack_digit(bufptr, pgdigit0)
for i in range(1, num_pgdigits):
pgdigit = hton.unpack_int16(frb_read(buf, 2))
bufptr = _unpack_digit(bufptr, pgdigit)
if dscale:
if trailing_fract_zeros_adj > 0:
for i in range(trailing_fract_zeros_adj):
bufptr[i] = <char>b'0'
# If display scale is _less_ than the number of rendered digits,
# trailing_fract_zeros_adj will be negative and this will strip
# the excess trailing zeros.
bufptr += trailing_fract_zeros_adj
if trail_fract_zero:
# Check if the number of rendered digits matches the exponent,
# and if so, add another trailing zero, so the result always
# appears with a decimal point.
actual_num_pydigits = bufptr - charbuf - 2
if sign == NUMERIC_NEG:
actual_num_pydigits -= 1
if actual_num_pydigits == abs_exponent:
bufptr[0] = <char>b'0'
bufptr += 1
if exponent != 0:
bufptr[0] = b'E'
if exponent < 0:
bufptr[1] = b'-'
else:
bufptr[1] = b'+'
bufptr += 2
snprintf(bufptr, <size_t>exponent_chars + 1, '%d',
<int>abs_exponent)
bufptr += exponent_chars
bufptr[0] = 0
pydigits = cpythonx.PyUnicode_FromString(charbuf)
return _Dec(pydigits)
finally:
if buf_allocated:
cpython.PyMem_Free(charbuf)
cdef numeric_decode_binary(CodecContext settings, FRBuffer *buf):
return numeric_decode_binary_ex(settings, buf, False)
cdef inline char *_unpack_digit_stripping_lzeros(char *buf, int64_t pgdigit):
cdef:
int64_t d
bint significant
d = pgdigit // 1000
significant = (d > 0)
if significant:
pgdigit -= d * 1000
buf[0] = <char>(d + <int32_t>b'0')
buf += 1
d = pgdigit // 100
significant |= (d > 0)
if significant:
pgdigit -= d * 100
buf[0] = <char>(d + <int32_t>b'0')
buf += 1
d = pgdigit // 10
significant |= (d > 0)
if significant:
pgdigit -= d * 10
buf[0] = <char>(d + <int32_t>b'0')
buf += 1
buf[0] = <char>(pgdigit + <int32_t>b'0')
buf += 1
return buf
cdef inline char *_unpack_digit(char *buf, int64_t pgdigit):
cdef:
int64_t d
d = pgdigit // 1000
pgdigit -= d * 1000
buf[0] = <char>(d + <int32_t>b'0')
d = pgdigit // 100
pgdigit -= d * 100
buf[1] = <char>(d + <int32_t>b'0')
d = pgdigit // 10
pgdigit -= d * 10
buf[2] = <char>(d + <int32_t>b'0')
buf[3] = <char>(pgdigit + <int32_t>b'0')
buf += 4
return buf

View File

@@ -0,0 +1,63 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef pg_snapshot_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
ssize_t nxip
uint64_t xmin
uint64_t xmax
int i
WriteBuffer xip_buf = WriteBuffer.new()
if not (cpython.PyTuple_Check(obj) or cpython.PyList_Check(obj)):
raise TypeError(
'list or tuple expected (got type {})'.format(type(obj)))
if len(obj) != 3:
raise ValueError(
'invalid number of elements in txid_snapshot tuple, expecting 4')
nxip = len(obj[2])
if nxip > _MAXINT32:
raise ValueError('txid_snapshot value is too long')
xmin = obj[0]
xmax = obj[1]
for i in range(nxip):
xip_buf.write_int64(
<int64_t>cpython.PyLong_AsUnsignedLongLong(obj[2][i]))
buf.write_int32(20 + xip_buf.len())
buf.write_int32(<int32_t>nxip)
buf.write_int64(<int64_t>xmin)
buf.write_int64(<int64_t>xmax)
buf.write_buffer(xip_buf)
cdef pg_snapshot_decode(CodecContext settings, FRBuffer *buf):
cdef:
int32_t nxip
uint64_t xmin
uint64_t xmax
tuple xip_tup
int32_t i
object xip
nxip = hton.unpack_int32(frb_read(buf, 4))
xmin = <uint64_t>hton.unpack_int64(frb_read(buf, 8))
xmax = <uint64_t>hton.unpack_int64(frb_read(buf, 8))
xip_tup = cpython.PyTuple_New(nxip)
for i in range(nxip):
xip = cpython.PyLong_FromUnsignedLongLong(
<uint64_t>hton.unpack_int64(frb_read(buf, 8)))
cpython.Py_INCREF(xip)
cpython.PyTuple_SET_ITEM(xip_tup, i, xip)
return (xmin, xmax, xip_tup)

View File

@@ -0,0 +1,48 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef inline as_pg_string_and_size(
CodecContext settings, obj, char **cstr, ssize_t *size):
if not cpython.PyUnicode_Check(obj):
raise TypeError('expected str, got {}'.format(type(obj).__name__))
if settings.is_encoding_utf8():
cstr[0] = <char*>cpythonx.PyUnicode_AsUTF8AndSize(obj, size)
else:
encoded = settings.get_text_codec().encode(obj)[0]
cpython.PyBytes_AsStringAndSize(encoded, cstr, size)
if size[0] > 0x7fffffff:
raise ValueError('string too long')
cdef text_encode(CodecContext settings, WriteBuffer buf, obj):
cdef:
char *str
ssize_t size
as_pg_string_and_size(settings, obj, &str, &size)
buf.write_int32(<int32_t>size)
buf.write_cstr(str, size)
cdef inline decode_pg_string(CodecContext settings, const char* data,
ssize_t len):
if settings.is_encoding_utf8():
# decode UTF-8 in strict mode
return cpython.PyUnicode_DecodeUTF8(data, len, NULL)
else:
bytes = cpython.PyBytes_FromStringAndSize(data, len)
return settings.get_text_codec().decode(bytes)[0]
cdef text_decode(CodecContext settings, FRBuffer *buf):
cdef ssize_t buf_len = buf.len
return decode_pg_string(settings, frb_read_all(buf), buf_len)

View File

@@ -0,0 +1,51 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef tid_encode(CodecContext settings, WriteBuffer buf, obj):
cdef int overflow = 0
cdef unsigned long block, offset
if not (cpython.PyTuple_Check(obj) or cpython.PyList_Check(obj)):
raise TypeError(
'list or tuple expected (got type {})'.format(type(obj)))
if len(obj) != 2:
raise ValueError(
'invalid number of elements in tid tuple, expecting 2')
try:
block = cpython.PyLong_AsUnsignedLong(obj[0])
except OverflowError:
overflow = 1
# "long" and "long long" have the same size for x86_64, need an extra check
if overflow or (sizeof(block) > 4 and block > UINT32_MAX):
raise OverflowError('tuple id block value out of uint32 range')
try:
offset = cpython.PyLong_AsUnsignedLong(obj[1])
overflow = 0
except OverflowError:
overflow = 1
if overflow or offset > 65535:
raise OverflowError('tuple id offset value out of uint16 range')
buf.write_int32(6)
buf.write_int32(<int32_t>block)
buf.write_int16(<int16_t>offset)
cdef tid_decode(CodecContext settings, FRBuffer *buf):
cdef:
uint32_t block
uint16_t offset
block = <uint32_t>hton.unpack_int32(frb_read(buf, 4))
offset = <uint16_t>hton.unpack_int16(frb_read(buf, 2))
return (block, offset)

View File

@@ -0,0 +1,27 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef uuid_encode(CodecContext settings, WriteBuffer wbuf, obj):
cdef:
char buf[16]
if type(obj) is pg_UUID:
wbuf.write_int32(<int32_t>16)
wbuf.write_cstr((<UUID>obj)._data, 16)
elif cpython.PyUnicode_Check(obj):
pg_uuid_bytes_from_str(obj, buf)
wbuf.write_int32(<int32_t>16)
wbuf.write_cstr(buf, 16)
else:
bytea_encode(settings, wbuf, obj.bytes)
cdef uuid_decode(CodecContext settings, FRBuffer *buf):
if buf.len != 16:
raise TypeError(
f'cannot decode UUID, expected 16 bytes, got {buf.len}')
return pg_uuid_from_buf(frb_read_all(buf))

View File

@@ -0,0 +1,12 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
DEF _BUFFER_INITIAL_SIZE = 1024
DEF _BUFFER_MAX_GROW = 65536
DEF _BUFFER_FREELIST_SIZE = 256
DEF _MAXINT32 = 2**31 - 1
DEF _NUMERIC_DECODER_SMALLBUF_SIZE = 256

View File

@@ -0,0 +1,23 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
from cpython cimport Py_buffer
cdef extern from "Python.h":
int PyUnicode_1BYTE_KIND
int PyByteArray_CheckExact(object)
int PyByteArray_Resize(object, ssize_t) except -1
object PyByteArray_FromStringAndSize(const char *, ssize_t)
char* PyByteArray_AsString(object)
object PyUnicode_FromString(const char *u)
const char* PyUnicode_AsUTF8AndSize(
object unicode, ssize_t *size) except NULL
object PyUnicode_FromKindAndData(
int kind, const void *buffer, Py_ssize_t size)

View File

@@ -0,0 +1,10 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef extern from "debug.h":
cdef int PG_DEBUG

View File

@@ -0,0 +1,48 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef:
struct FRBuffer:
const char* buf
ssize_t len
inline ssize_t frb_get_len(FRBuffer *frb):
return frb.len
inline void frb_set_len(FRBuffer *frb, ssize_t new_len):
frb.len = new_len
inline void frb_init(FRBuffer *frb, const char *buf, ssize_t len):
frb.buf = buf
frb.len = len
inline const char* frb_read(FRBuffer *frb, ssize_t n) except NULL:
cdef const char *result
frb_check(frb, n)
result = frb.buf
frb.buf += n
frb.len -= n
return result
inline const char* frb_read_all(FRBuffer *frb):
cdef const char *result
result = frb.buf
frb.buf += frb.len
frb.len = 0
return result
inline FRBuffer *frb_slice_from(FRBuffer *frb,
FRBuffer* source, ssize_t len):
frb.buf = frb_read(source, len)
frb.len = len
return frb
object frb_check(FRBuffer *frb, ssize_t n)

View File

@@ -0,0 +1,12 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef object frb_check(FRBuffer *frb, ssize_t n):
if n > frb.len:
raise AssertionError(
f'insufficient data in buffer: requested {n} '
f'remaining {frb.len}')

View File

@@ -0,0 +1,24 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
from libc.stdint cimport int16_t, int32_t, uint16_t, uint32_t, int64_t, uint64_t
cdef extern from "./hton.h":
cdef void pack_int16(char *buf, int16_t x);
cdef void pack_int32(char *buf, int32_t x);
cdef void pack_int64(char *buf, int64_t x);
cdef void pack_float(char *buf, float f);
cdef void pack_double(char *buf, double f);
cdef int16_t unpack_int16(const char *buf);
cdef uint16_t unpack_uint16(const char *buf);
cdef int32_t unpack_int32(const char *buf);
cdef uint32_t unpack_uint32(const char *buf);
cdef int64_t unpack_int64(const char *buf);
cdef uint64_t unpack_uint64(const char *buf);
cdef float unpack_float(const char *buf);
cdef double unpack_double(const char *buf);

View File

@@ -0,0 +1,19 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cimport cython
cimport cpython
from libc.stdint cimport int16_t, int32_t, uint16_t, uint32_t, int64_t, uint64_t
include "./consts.pxi"
include "./frb.pxd"
include "./buffer.pxd"
include "./codecs/__init__.pxd"

View File

@@ -0,0 +1,49 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cimport cython
cimport cpython
from . cimport cpythonx
from libc.stdint cimport int8_t, uint8_t, int16_t, uint16_t, \
int32_t, uint32_t, int64_t, uint64_t, \
INT16_MIN, INT16_MAX, INT32_MIN, INT32_MAX, \
UINT32_MAX, INT64_MIN, INT64_MAX, UINT64_MAX
from . cimport hton
from . cimport tohex
from .debug cimport PG_DEBUG
from . import types as pgproto_types
include "./consts.pxi"
include "./frb.pyx"
include "./buffer.pyx"
include "./uuid.pyx"
include "./codecs/context.pyx"
include "./codecs/bytea.pyx"
include "./codecs/text.pyx"
include "./codecs/datetime.pyx"
include "./codecs/float.pyx"
include "./codecs/int.pyx"
include "./codecs/json.pyx"
include "./codecs/jsonpath.pyx"
include "./codecs/uuid.pyx"
include "./codecs/numeric.pyx"
include "./codecs/bits.pyx"
include "./codecs/geometry.pyx"
include "./codecs/hstore.pyx"
include "./codecs/misc.pyx"
include "./codecs/network.pyx"
include "./codecs/tid.pyx"
include "./codecs/pg_snapshot.pyx"

View File

@@ -0,0 +1,10 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
cdef extern from "./tohex.h":
cdef void uuid_to_str(const char *source, char *dest)
cdef void uuid_to_hex(const char *source, char *dest)

View File

@@ -0,0 +1,423 @@
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import builtins
import sys
import typing
if sys.version_info >= (3, 8):
from typing import Literal, SupportsIndex
else:
from typing_extensions import Literal, SupportsIndex
__all__ = (
'BitString', 'Point', 'Path', 'Polygon',
'Box', 'Line', 'LineSegment', 'Circle',
)
_BitString = typing.TypeVar('_BitString', bound='BitString')
_BitOrderType = Literal['big', 'little']
class BitString:
"""Immutable representation of PostgreSQL `bit` and `varbit` types."""
__slots__ = '_bytes', '_bitlength'
def __init__(self,
bitstring: typing.Optional[builtins.bytes] = None) -> None:
if not bitstring:
self._bytes = bytes()
self._bitlength = 0
else:
bytelen = len(bitstring) // 8 + 1
bytes_ = bytearray(bytelen)
byte = 0
byte_pos = 0
bit_pos = 0
for i, bit in enumerate(bitstring):
if bit == ' ': # type: ignore
continue
bit = int(bit)
if bit != 0 and bit != 1:
raise ValueError(
'invalid bit value at position {}'.format(i))
byte |= bit << (8 - bit_pos - 1)
bit_pos += 1
if bit_pos == 8:
bytes_[byte_pos] = byte
byte = 0
byte_pos += 1
bit_pos = 0
if bit_pos != 0:
bytes_[byte_pos] = byte
bitlen = byte_pos * 8 + bit_pos
bytelen = byte_pos + (1 if bit_pos else 0)
self._bytes = bytes(bytes_[:bytelen])
self._bitlength = bitlen
@classmethod
def frombytes(cls: typing.Type[_BitString],
bytes_: typing.Optional[builtins.bytes] = None,
bitlength: typing.Optional[int] = None) -> _BitString:
if bitlength is None:
if bytes_ is None:
bytes_ = bytes()
bitlength = 0
else:
bitlength = len(bytes_) * 8
else:
if bytes_ is None:
bytes_ = bytes(bitlength // 8 + 1)
bitlength = bitlength
else:
bytes_len = len(bytes_) * 8
if bytes_len == 0 and bitlength != 0:
raise ValueError('invalid bit length specified')
if bytes_len != 0 and bitlength == 0:
raise ValueError('invalid bit length specified')
if bitlength < bytes_len - 8:
raise ValueError('invalid bit length specified')
if bitlength > bytes_len:
raise ValueError('invalid bit length specified')
result = cls()
result._bytes = bytes_
result._bitlength = bitlength
return result
@property
def bytes(self) -> builtins.bytes:
return self._bytes
def as_string(self) -> str:
s = ''
for i in range(self._bitlength):
s += str(self._getitem(i))
if i % 4 == 3:
s += ' '
return s.strip()
def to_int(self, bitorder: _BitOrderType = 'big',
*, signed: bool = False) -> int:
"""Interpret the BitString as a Python int.
Acts similarly to int.from_bytes.
:param bitorder:
Determines the bit order used to interpret the BitString. By
default, this function uses Postgres conventions for casting bits
to ints. If bitorder is 'big', the most significant bit is at the
start of the string (this is the same as the default). If bitorder
is 'little', the most significant bit is at the end of the string.
:param bool signed:
Determines whether two's complement is used to interpret the
BitString. If signed is False, the returned value is always
non-negative.
:return int: An integer representing the BitString. Information about
the BitString's exact length is lost.
.. versionadded:: 0.18.0
"""
x = int.from_bytes(self._bytes, byteorder='big')
x >>= -self._bitlength % 8
if bitorder == 'big':
pass
elif bitorder == 'little':
x = int(bin(x)[:1:-1].ljust(self._bitlength, '0'), 2)
else:
raise ValueError("bitorder must be either 'big' or 'little'")
if signed and self._bitlength > 0 and x & (1 << (self._bitlength - 1)):
x -= 1 << self._bitlength
return x
@classmethod
def from_int(cls: typing.Type[_BitString], x: int, length: int,
bitorder: _BitOrderType = 'big', *, signed: bool = False) \
-> _BitString:
"""Represent the Python int x as a BitString.
Acts similarly to int.to_bytes.
:param int x:
An integer to represent. Negative integers are represented in two's
complement form, unless the argument signed is False, in which case
negative integers raise an OverflowError.
:param int length:
The length of the resulting BitString. An OverflowError is raised
if the integer is not representable in this many bits.
:param bitorder:
Determines the bit order used in the BitString representation. By
default, this function uses Postgres conventions for casting ints
to bits. If bitorder is 'big', the most significant bit is at the
start of the string (this is the same as the default). If bitorder
is 'little', the most significant bit is at the end of the string.
:param bool signed:
Determines whether two's complement is used in the BitString
representation. If signed is False and a negative integer is given,
an OverflowError is raised.
:return BitString: A BitString representing the input integer, in the
form specified by the other input args.
.. versionadded:: 0.18.0
"""
# Exception types are by analogy to int.to_bytes
if length < 0:
raise ValueError("length argument must be non-negative")
elif length < x.bit_length():
raise OverflowError("int too big to convert")
if x < 0:
if not signed:
raise OverflowError("can't convert negative int to unsigned")
x &= (1 << length) - 1
if bitorder == 'big':
pass
elif bitorder == 'little':
x = int(bin(x)[:1:-1].ljust(length, '0'), 2)
else:
raise ValueError("bitorder must be either 'big' or 'little'")
x <<= (-length % 8)
bytes_ = x.to_bytes((length + 7) // 8, byteorder='big')
return cls.frombytes(bytes_, length)
def __repr__(self) -> str:
return '<BitString {}>'.format(self.as_string())
__str__: typing.Callable[['BitString'], str] = __repr__
def __eq__(self, other: object) -> bool:
if not isinstance(other, BitString):
return NotImplemented
return (self._bytes == other._bytes and
self._bitlength == other._bitlength)
def __hash__(self) -> int:
return hash((self._bytes, self._bitlength))
def _getitem(self, i: int) -> int:
byte = self._bytes[i // 8]
shift = 8 - i % 8 - 1
return (byte >> shift) & 0x1
def __getitem__(self, i: int) -> int:
if isinstance(i, slice):
raise NotImplementedError('BitString does not support slices')
if i >= self._bitlength:
raise IndexError('index out of range')
return self._getitem(i)
def __len__(self) -> int:
return self._bitlength
class Point(typing.Tuple[float, float]):
"""Immutable representation of PostgreSQL `point` type."""
__slots__ = ()
def __new__(cls,
x: typing.Union[typing.SupportsFloat,
SupportsIndex,
typing.Text,
builtins.bytes,
builtins.bytearray],
y: typing.Union[typing.SupportsFloat,
SupportsIndex,
typing.Text,
builtins.bytes,
builtins.bytearray]) -> 'Point':
return super().__new__(cls,
typing.cast(typing.Any, (float(x), float(y))))
def __repr__(self) -> str:
return '{}.{}({})'.format(
type(self).__module__,
type(self).__name__,
tuple.__repr__(self)
)
@property
def x(self) -> float:
return self[0]
@property
def y(self) -> float:
return self[1]
class Box(typing.Tuple[Point, Point]):
"""Immutable representation of PostgreSQL `box` type."""
__slots__ = ()
def __new__(cls, high: typing.Sequence[float],
low: typing.Sequence[float]) -> 'Box':
return super().__new__(cls,
typing.cast(typing.Any, (Point(*high),
Point(*low))))
def __repr__(self) -> str:
return '{}.{}({})'.format(
type(self).__module__,
type(self).__name__,
tuple.__repr__(self)
)
@property
def high(self) -> Point:
return self[0]
@property
def low(self) -> Point:
return self[1]
class Line(typing.Tuple[float, float, float]):
"""Immutable representation of PostgreSQL `line` type."""
__slots__ = ()
def __new__(cls, A: float, B: float, C: float) -> 'Line':
return super().__new__(cls, typing.cast(typing.Any, (A, B, C)))
@property
def A(self) -> float:
return self[0]
@property
def B(self) -> float:
return self[1]
@property
def C(self) -> float:
return self[2]
class LineSegment(typing.Tuple[Point, Point]):
"""Immutable representation of PostgreSQL `lseg` type."""
__slots__ = ()
def __new__(cls, p1: typing.Sequence[float],
p2: typing.Sequence[float]) -> 'LineSegment':
return super().__new__(cls,
typing.cast(typing.Any, (Point(*p1),
Point(*p2))))
def __repr__(self) -> str:
return '{}.{}({})'.format(
type(self).__module__,
type(self).__name__,
tuple.__repr__(self)
)
@property
def p1(self) -> Point:
return self[0]
@property
def p2(self) -> Point:
return self[1]
class Path:
"""Immutable representation of PostgreSQL `path` type."""
__slots__ = '_is_closed', 'points'
points: typing.Tuple[Point, ...]
def __init__(self, *points: typing.Sequence[float],
is_closed: bool = False) -> None:
self.points = tuple(Point(*p) for p in points)
self._is_closed = is_closed
@property
def is_closed(self) -> bool:
return self._is_closed
def __eq__(self, other: object) -> bool:
if not isinstance(other, Path):
return NotImplemented
return (self.points == other.points and
self._is_closed == other._is_closed)
def __hash__(self) -> int:
return hash((self.points, self.is_closed))
def __iter__(self) -> typing.Iterator[Point]:
return iter(self.points)
def __len__(self) -> int:
return len(self.points)
@typing.overload
def __getitem__(self, i: int) -> Point:
...
@typing.overload
def __getitem__(self, i: slice) -> typing.Tuple[Point, ...]:
...
def __getitem__(self, i: typing.Union[int, slice]) \
-> typing.Union[Point, typing.Tuple[Point, ...]]:
return self.points[i]
def __contains__(self, point: object) -> bool:
return point in self.points
class Polygon(Path):
"""Immutable representation of PostgreSQL `polygon` type."""
__slots__ = ()
def __init__(self, *points: typing.Sequence[float]) -> None:
# polygon is always closed
super().__init__(*points, is_closed=True)
class Circle(typing.Tuple[Point, float]):
"""Immutable representation of PostgreSQL `circle` type."""
__slots__ = ()
def __new__(cls, center: Point, radius: float) -> 'Circle':
return super().__new__(cls, typing.cast(typing.Any, (center, radius)))
@property
def center(self) -> Point:
return self[0]
@property
def radius(self) -> float:
return self[1]

View File

@@ -0,0 +1,353 @@
import functools
import uuid
cimport cython
cimport cpython
from libc.stdint cimport uint8_t, int8_t
from libc.string cimport memcpy, memcmp
cdef extern from "Python.h":
int PyUnicode_1BYTE_KIND
const char* PyUnicode_AsUTF8AndSize(
object unicode, Py_ssize_t *size) except NULL
object PyUnicode_FromKindAndData(
int kind, const void *buffer, Py_ssize_t size)
cdef extern from "./tohex.h":
cdef void uuid_to_str(const char *source, char *dest)
cdef void uuid_to_hex(const char *source, char *dest)
# A more efficient UUID type implementation
# (6-7x faster than the starndard uuid.UUID):
#
# -= Benchmark results (less is better): =-
#
# std_UUID(bytes): 1.2368
# c_UUID(bytes): * 0.1645 (7.52x)
# object(): 0.1483
#
# std_UUID(str): 1.8038
# c_UUID(str): * 0.2313 (7.80x)
#
# str(std_UUID()): 1.4625
# str(c_UUID()): * 0.2681 (5.46x)
# str(object()): 0.5975
#
# std_UUID().bytes: 0.3508
# c_UUID().bytes: * 0.1068 (3.28x)
#
# std_UUID().int: 0.0871
# c_UUID().int: * 0.0856
#
# std_UUID().hex: 0.4871
# c_UUID().hex: * 0.1405
#
# hash(std_UUID()): 0.3635
# hash(c_UUID()): * 0.1564 (2.32x)
#
# dct[std_UUID()]: 0.3319
# dct[c_UUID()]: * 0.1570 (2.11x)
#
# std_UUID() ==: 0.3478
# c_UUID() ==: * 0.0915 (3.80x)
cdef char _hextable[256]
_hextable[:] = [
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1, 0,1,2,3,4,5,6,7,8,9,-1,-1,-1,-1,-1,-1,-1,10,11,12,13,14,15,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
]
cdef std_UUID = uuid.UUID
cdef pg_uuid_bytes_from_str(str u, char *out):
cdef:
const char *orig_buf
Py_ssize_t size
unsigned char ch
uint8_t acc, part, acc_set
int i, j
orig_buf = PyUnicode_AsUTF8AndSize(u, &size)
if size > 36 or size < 32:
raise ValueError(
f'invalid UUID {u!r}: '
f'length must be between 32..36 characters, got {size}')
acc_set = 0
j = 0
for i in range(size):
ch = <unsigned char>orig_buf[i]
if ch == <unsigned char>b'-':
continue
part = <uint8_t><int8_t>_hextable[ch]
if part == <uint8_t>-1:
if ch >= 0x20 and ch <= 0x7e:
raise ValueError(
f'invalid UUID {u!r}: unexpected character {chr(ch)!r}')
else:
raise ValueError('invalid UUID {u!r}: unexpected character')
if acc_set:
acc |= part
out[j] = <char>acc
acc_set = 0
j += 1
else:
acc = <uint8_t>(part << 4)
acc_set = 1
if j > 16 or (j == 16 and acc_set):
raise ValueError(
f'invalid UUID {u!r}: decodes to more than 16 bytes')
if j != 16:
raise ValueError(
f'invalid UUID {u!r}: decodes to less than 16 bytes')
cdef class __UUIDReplaceMe:
pass
cdef pg_uuid_from_buf(const char *buf):
cdef:
UUID u = UUID.__new__(UUID)
memcpy(u._data, buf, 16)
return u
@cython.final
@cython.no_gc_clear
cdef class UUID(__UUIDReplaceMe):
cdef:
char _data[16]
object _int
object _hash
object __weakref__
def __cinit__(self):
self._int = None
self._hash = None
def __init__(self, inp):
cdef:
char *buf
Py_ssize_t size
if cpython.PyBytes_Check(inp):
cpython.PyBytes_AsStringAndSize(inp, &buf, &size)
if size != 16:
raise ValueError(f'16 bytes were expected, got {size}')
memcpy(self._data, buf, 16)
elif cpython.PyUnicode_Check(inp):
pg_uuid_bytes_from_str(inp, self._data)
else:
raise TypeError(f'a bytes or str object expected, got {inp!r}')
@property
def bytes(self):
return cpython.PyBytes_FromStringAndSize(self._data, 16)
@property
def int(self):
if self._int is None:
# The cache is important because `self.int` can be
# used multiple times by __hash__ etc.
self._int = int.from_bytes(self.bytes, 'big')
return self._int
@property
def is_safe(self):
return uuid.SafeUUID.unknown
def __str__(self):
cdef char out[36]
uuid_to_str(self._data, out)
return PyUnicode_FromKindAndData(PyUnicode_1BYTE_KIND, <void*>out, 36)
@property
def hex(self):
cdef char out[32]
uuid_to_hex(self._data, out)
return PyUnicode_FromKindAndData(PyUnicode_1BYTE_KIND, <void*>out, 32)
def __repr__(self):
return f"UUID('{self}')"
def __reduce__(self):
return (type(self), (self.bytes,))
def __eq__(self, other):
if type(other) is UUID:
return memcmp(self._data, (<UUID>other)._data, 16) == 0
if isinstance(other, std_UUID):
return self.int == other.int
return NotImplemented
def __ne__(self, other):
if type(other) is UUID:
return memcmp(self._data, (<UUID>other)._data, 16) != 0
if isinstance(other, std_UUID):
return self.int != other.int
return NotImplemented
def __lt__(self, other):
if type(other) is UUID:
return memcmp(self._data, (<UUID>other)._data, 16) < 0
if isinstance(other, std_UUID):
return self.int < other.int
return NotImplemented
def __gt__(self, other):
if type(other) is UUID:
return memcmp(self._data, (<UUID>other)._data, 16) > 0
if isinstance(other, std_UUID):
return self.int > other.int
return NotImplemented
def __le__(self, other):
if type(other) is UUID:
return memcmp(self._data, (<UUID>other)._data, 16) <= 0
if isinstance(other, std_UUID):
return self.int <= other.int
return NotImplemented
def __ge__(self, other):
if type(other) is UUID:
return memcmp(self._data, (<UUID>other)._data, 16) >= 0
if isinstance(other, std_UUID):
return self.int >= other.int
return NotImplemented
def __hash__(self):
# In EdgeDB every schema object has a uuid and there are
# huge hash-maps of them. We want UUID.__hash__ to be
# as fast as possible.
if self._hash is not None:
return self._hash
self._hash = hash(self.int)
return self._hash
def __int__(self):
return self.int
@property
def bytes_le(self):
bytes = self.bytes
return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] +
bytes[8:])
@property
def fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
@property
def time_low(self):
return self.int >> 96
@property
def time_mid(self):
return (self.int >> 80) & 0xffff
@property
def time_hi_version(self):
return (self.int >> 64) & 0xffff
@property
def clock_seq_hi_variant(self):
return (self.int >> 56) & 0xff
@property
def clock_seq_low(self):
return (self.int >> 48) & 0xff
@property
def time(self):
return (((self.time_hi_version & 0x0fff) << 48) |
(self.time_mid << 32) | self.time_low)
@property
def clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3f) << 8) |
self.clock_seq_low)
@property
def node(self):
return self.int & 0xffffffffffff
@property
def urn(self):
return 'urn:uuid:' + str(self)
@property
def variant(self):
if not self.int & (0x8000 << 48):
return uuid.RESERVED_NCS
elif not self.int & (0x4000 << 48):
return uuid.RFC_4122
elif not self.int & (0x2000 << 48):
return uuid.RESERVED_MICROSOFT
else:
return uuid.RESERVED_FUTURE
@property
def version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == uuid.RFC_4122:
return int((self.int >> 76) & 0xf)
# <hack>
# In order for `isinstance(pgproto.UUID, uuid.UUID)` to work,
# patch __bases__ and __mro__ by injecting `uuid.UUID`.
#
# We apply brute-force here because the following pattern stopped
# working with Python 3.8:
#
# cdef class OurUUID:
# ...
#
# class UUID(OurUUID, uuid.UUID):
# ...
#
# With Python 3.8 it now produces
#
# "TypeError: multiple bases have instance lay-out conflict"
#
# error. Maybe it's possible to fix this some other way, but
# the best solution possible would be to just contribute our
# faster UUID to the standard library and not have this problem
# at all. For now this hack is pretty safe and should be
# compatible with future Pythons for long enough.
#
assert UUID.__bases__[0] is __UUIDReplaceMe
assert UUID.__mro__[1] is __UUIDReplaceMe
cpython.Py_INCREF(std_UUID)
cpython.PyTuple_SET_ITEM(UUID.__bases__, 0, std_UUID)
cpython.Py_INCREF(std_UUID)
cpython.PyTuple_SET_ITEM(UUID.__mro__, 1, std_UUID)
# </hack>
cdef pg_UUID = UUID