Major fixes and new features
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-09-25 15:51:48 +09:00
parent dd7349bb4c
commit ddce9f5125
5586 changed files with 1470941 additions and 0 deletions

View File

@@ -0,0 +1,46 @@
from __future__ import absolute_import
API_KEYS = {
0: 'Produce',
1: 'Fetch',
2: 'ListOffsets',
3: 'Metadata',
4: 'LeaderAndIsr',
5: 'StopReplica',
6: 'UpdateMetadata',
7: 'ControlledShutdown',
8: 'OffsetCommit',
9: 'OffsetFetch',
10: 'FindCoordinator',
11: 'JoinGroup',
12: 'Heartbeat',
13: 'LeaveGroup',
14: 'SyncGroup',
15: 'DescribeGroups',
16: 'ListGroups',
17: 'SaslHandshake',
18: 'ApiVersions',
19: 'CreateTopics',
20: 'DeleteTopics',
21: 'DeleteRecords',
22: 'InitProducerId',
23: 'OffsetForLeaderEpoch',
24: 'AddPartitionsToTxn',
25: 'AddOffsetsToTxn',
26: 'EndTxn',
27: 'WriteTxnMarkers',
28: 'TxnOffsetCommit',
29: 'DescribeAcls',
30: 'CreateAcls',
31: 'DeleteAcls',
32: 'DescribeConfigs',
33: 'AlterConfigs',
36: 'SaslAuthenticate',
37: 'CreatePartitions',
38: 'CreateDelegationToken',
39: 'RenewDelegationToken',
40: 'ExpireDelegationToken',
41: 'DescribeDelegationToken',
42: 'DeleteGroups',
}

View File

@@ -0,0 +1,19 @@
from __future__ import absolute_import
import abc
class AbstractType(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def encode(cls, value): # pylint: disable=no-self-argument
pass
@abc.abstractmethod
def decode(cls, data): # pylint: disable=no-self-argument
pass
@classmethod
def repr(cls, value):
return repr(value)

View File

@@ -0,0 +1,925 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Boolean, Bytes, Int8, Int16, Int32, Int64, Schema, String
class ApiVersionResponse_v0(Response):
API_KEY = 18
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16)))
)
class ApiVersionResponse_v1(Response):
API_KEY = 18
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16))),
('throttle_time_ms', Int32)
)
class ApiVersionResponse_v2(Response):
API_KEY = 18
API_VERSION = 2
SCHEMA = ApiVersionResponse_v1.SCHEMA
class ApiVersionRequest_v0(Request):
API_KEY = 18
API_VERSION = 0
RESPONSE_TYPE = ApiVersionResponse_v0
SCHEMA = Schema()
class ApiVersionRequest_v1(Request):
API_KEY = 18
API_VERSION = 1
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
class ApiVersionRequest_v2(Request):
API_KEY = 18
API_VERSION = 2
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
ApiVersionRequest = [
ApiVersionRequest_v0, ApiVersionRequest_v1, ApiVersionRequest_v2,
]
ApiVersionResponse = [
ApiVersionResponse_v0, ApiVersionResponse_v1, ApiVersionResponse_v2,
]
class CreateTopicsResponse_v0(Response):
API_KEY = 19
API_VERSION = 0
SCHEMA = Schema(
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class CreateTopicsResponse_v1(Response):
API_KEY = 19
API_VERSION = 1
SCHEMA = Schema(
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v2(Response):
API_KEY = 19
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v3(Response):
API_KEY = 19
API_VERSION = 3
SCHEMA = CreateTopicsResponse_v2.SCHEMA
class CreateTopicsRequest_v0(Request):
API_KEY = 19
API_VERSION = 0
RESPONSE_TYPE = CreateTopicsResponse_v0
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32)
)
class CreateTopicsRequest_v1(Request):
API_KEY = 19
API_VERSION = 1
RESPONSE_TYPE = CreateTopicsResponse_v1
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreateTopicsRequest_v2(Request):
API_KEY = 19
API_VERSION = 2
RESPONSE_TYPE = CreateTopicsResponse_v2
SCHEMA = CreateTopicsRequest_v1.SCHEMA
class CreateTopicsRequest_v3(Request):
API_KEY = 19
API_VERSION = 3
RESPONSE_TYPE = CreateTopicsResponse_v3
SCHEMA = CreateTopicsRequest_v1.SCHEMA
CreateTopicsRequest = [
CreateTopicsRequest_v0, CreateTopicsRequest_v1,
CreateTopicsRequest_v2, CreateTopicsRequest_v3,
]
CreateTopicsResponse = [
CreateTopicsResponse_v0, CreateTopicsResponse_v1,
CreateTopicsResponse_v2, CreateTopicsResponse_v3,
]
class DeleteTopicsResponse_v0(Response):
API_KEY = 20
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v1(Response):
API_KEY = 20
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v2(Response):
API_KEY = 20
API_VERSION = 2
SCHEMA = DeleteTopicsResponse_v1.SCHEMA
class DeleteTopicsResponse_v3(Response):
API_KEY = 20
API_VERSION = 3
SCHEMA = DeleteTopicsResponse_v1.SCHEMA
class DeleteTopicsRequest_v0(Request):
API_KEY = 20
API_VERSION = 0
RESPONSE_TYPE = DeleteTopicsResponse_v0
SCHEMA = Schema(
('topics', Array(String('utf-8'))),
('timeout', Int32)
)
class DeleteTopicsRequest_v1(Request):
API_KEY = 20
API_VERSION = 1
RESPONSE_TYPE = DeleteTopicsResponse_v1
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
class DeleteTopicsRequest_v2(Request):
API_KEY = 20
API_VERSION = 2
RESPONSE_TYPE = DeleteTopicsResponse_v2
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
class DeleteTopicsRequest_v3(Request):
API_KEY = 20
API_VERSION = 3
RESPONSE_TYPE = DeleteTopicsResponse_v3
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
DeleteTopicsRequest = [
DeleteTopicsRequest_v0, DeleteTopicsRequest_v1,
DeleteTopicsRequest_v2, DeleteTopicsRequest_v3,
]
DeleteTopicsResponse = [
DeleteTopicsResponse_v0, DeleteTopicsResponse_v1,
DeleteTopicsResponse_v2, DeleteTopicsResponse_v3,
]
class ListGroupsResponse_v0(Response):
API_KEY = 16
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v1(Response):
API_KEY = 16
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v2(Response):
API_KEY = 16
API_VERSION = 2
SCHEMA = ListGroupsResponse_v1.SCHEMA
class ListGroupsRequest_v0(Request):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse_v0
SCHEMA = Schema()
class ListGroupsRequest_v1(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v1
SCHEMA = ListGroupsRequest_v0.SCHEMA
class ListGroupsRequest_v2(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v2
SCHEMA = ListGroupsRequest_v0.SCHEMA
ListGroupsRequest = [
ListGroupsRequest_v0, ListGroupsRequest_v1,
ListGroupsRequest_v2,
]
ListGroupsResponse = [
ListGroupsResponse_v0, ListGroupsResponse_v1,
ListGroupsResponse_v2,
]
class DescribeGroupsResponse_v0(Response):
API_KEY = 15
API_VERSION = 0
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v1(Response):
API_KEY = 15
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v2(Response):
API_KEY = 15
API_VERSION = 2
SCHEMA = DescribeGroupsResponse_v1.SCHEMA
class DescribeGroupsResponse_v3(Response):
API_KEY = 15
API_VERSION = 3
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))),
('authorized_operations', Int32))
)
class DescribeGroupsRequest_v0(Request):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse_v0
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
class DescribeGroupsRequest_v1(Request):
API_KEY = 15
API_VERSION = 1
RESPONSE_TYPE = DescribeGroupsResponse_v1
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
class DescribeGroupsRequest_v2(Request):
API_KEY = 15
API_VERSION = 2
RESPONSE_TYPE = DescribeGroupsResponse_v2
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
class DescribeGroupsRequest_v3(Request):
API_KEY = 15
API_VERSION = 3
RESPONSE_TYPE = DescribeGroupsResponse_v2
SCHEMA = Schema(
('groups', Array(String('utf-8'))),
('include_authorized_operations', Boolean)
)
DescribeGroupsRequest = [
DescribeGroupsRequest_v0, DescribeGroupsRequest_v1,
DescribeGroupsRequest_v2, DescribeGroupsRequest_v3,
]
DescribeGroupsResponse = [
DescribeGroupsResponse_v0, DescribeGroupsResponse_v1,
DescribeGroupsResponse_v2, DescribeGroupsResponse_v3,
]
class SaslHandShakeResponse_v0(Response):
API_KEY = 17
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('enabled_mechanisms', Array(String('utf-8')))
)
class SaslHandShakeResponse_v1(Response):
API_KEY = 17
API_VERSION = 1
SCHEMA = SaslHandShakeResponse_v0.SCHEMA
class SaslHandShakeRequest_v0(Request):
API_KEY = 17
API_VERSION = 0
RESPONSE_TYPE = SaslHandShakeResponse_v0
SCHEMA = Schema(
('mechanism', String('utf-8'))
)
class SaslHandShakeRequest_v1(Request):
API_KEY = 17
API_VERSION = 1
RESPONSE_TYPE = SaslHandShakeResponse_v1
SCHEMA = SaslHandShakeRequest_v0.SCHEMA
SaslHandShakeRequest = [SaslHandShakeRequest_v0, SaslHandShakeRequest_v1]
SaslHandShakeResponse = [SaslHandShakeResponse_v0, SaslHandShakeResponse_v1]
class DescribeAclsResponse_v0(Response):
API_KEY = 29
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('error_message', String('utf-8')),
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('acls', Array(
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DescribeAclsResponse_v1(Response):
API_KEY = 29
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('error_message', String('utf-8')),
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type', Int8),
('acls', Array(
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DescribeAclsResponse_v2(Response):
API_KEY = 29
API_VERSION = 2
SCHEMA = DescribeAclsResponse_v1.SCHEMA
class DescribeAclsRequest_v0(Request):
API_KEY = 29
API_VERSION = 0
RESPONSE_TYPE = DescribeAclsResponse_v0
SCHEMA = Schema(
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)
)
class DescribeAclsRequest_v1(Request):
API_KEY = 29
API_VERSION = 1
RESPONSE_TYPE = DescribeAclsResponse_v1
SCHEMA = Schema(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type_filter', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)
)
class DescribeAclsRequest_v2(Request):
"""
Enable flexible version
"""
API_KEY = 29
API_VERSION = 2
RESPONSE_TYPE = DescribeAclsResponse_v2
SCHEMA = DescribeAclsRequest_v1.SCHEMA
DescribeAclsRequest = [DescribeAclsRequest_v0, DescribeAclsRequest_v1]
DescribeAclsResponse = [DescribeAclsResponse_v0, DescribeAclsResponse_v1]
class CreateAclsResponse_v0(Response):
API_KEY = 30
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('creation_responses', Array(
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateAclsResponse_v1(Response):
API_KEY = 30
API_VERSION = 1
SCHEMA = CreateAclsResponse_v0.SCHEMA
class CreateAclsRequest_v0(Request):
API_KEY = 30
API_VERSION = 0
RESPONSE_TYPE = CreateAclsResponse_v0
SCHEMA = Schema(
('creations', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
class CreateAclsRequest_v1(Request):
API_KEY = 30
API_VERSION = 1
RESPONSE_TYPE = CreateAclsResponse_v1
SCHEMA = Schema(
('creations', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
CreateAclsRequest = [CreateAclsRequest_v0, CreateAclsRequest_v1]
CreateAclsResponse = [CreateAclsResponse_v0, CreateAclsResponse_v1]
class DeleteAclsResponse_v0(Response):
API_KEY = 31
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('filter_responses', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('matching_acls', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DeleteAclsResponse_v1(Response):
API_KEY = 31
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('filter_responses', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('matching_acls', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DeleteAclsRequest_v0(Request):
API_KEY = 31
API_VERSION = 0
RESPONSE_TYPE = DeleteAclsResponse_v0
SCHEMA = Schema(
('filters', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
class DeleteAclsRequest_v1(Request):
API_KEY = 31
API_VERSION = 1
RESPONSE_TYPE = DeleteAclsResponse_v1
SCHEMA = Schema(
('filters', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type_filter', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
DeleteAclsRequest = [DeleteAclsRequest_v0, DeleteAclsRequest_v1]
DeleteAclsResponse = [DeleteAclsResponse_v0, DeleteAclsResponse_v1]
class AlterConfigsResponse_v0(Response):
API_KEY = 33
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8'))))
)
class AlterConfigsResponse_v1(Response):
API_KEY = 33
API_VERSION = 1
SCHEMA = AlterConfigsResponse_v0.SCHEMA
class AlterConfigsRequest_v0(Request):
API_KEY = 33
API_VERSION = 0
RESPONSE_TYPE = AlterConfigsResponse_v0
SCHEMA = Schema(
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_name', String('utf-8')),
('config_value', String('utf-8')))))),
('validate_only', Boolean)
)
class AlterConfigsRequest_v1(Request):
API_KEY = 33
API_VERSION = 1
RESPONSE_TYPE = AlterConfigsResponse_v1
SCHEMA = AlterConfigsRequest_v0.SCHEMA
AlterConfigsRequest = [AlterConfigsRequest_v0, AlterConfigsRequest_v1]
AlterConfigsResponse = [AlterConfigsResponse_v0, AlterConfigsRequest_v1]
class DescribeConfigsResponse_v0(Response):
API_KEY = 32
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_names', String('utf-8')),
('config_value', String('utf-8')),
('read_only', Boolean),
('is_default', Boolean),
('is_sensitive', Boolean)))))
)
class DescribeConfigsResponse_v1(Response):
API_KEY = 32
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_names', String('utf-8')),
('config_value', String('utf-8')),
('read_only', Boolean),
('is_default', Boolean),
('is_sensitive', Boolean),
('config_synonyms', Array(
('config_name', String('utf-8')),
('config_value', String('utf-8')),
('config_source', Int8)))))))
)
class DescribeConfigsResponse_v2(Response):
API_KEY = 32
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_names', String('utf-8')),
('config_value', String('utf-8')),
('read_only', Boolean),
('config_source', Int8),
('is_sensitive', Boolean),
('config_synonyms', Array(
('config_name', String('utf-8')),
('config_value', String('utf-8')),
('config_source', Int8)))))))
)
class DescribeConfigsRequest_v0(Request):
API_KEY = 32
API_VERSION = 0
RESPONSE_TYPE = DescribeConfigsResponse_v0
SCHEMA = Schema(
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_names', Array(String('utf-8')))))
)
class DescribeConfigsRequest_v1(Request):
API_KEY = 32
API_VERSION = 1
RESPONSE_TYPE = DescribeConfigsResponse_v1
SCHEMA = Schema(
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_names', Array(String('utf-8'))))),
('include_synonyms', Boolean)
)
class DescribeConfigsRequest_v2(Request):
API_KEY = 32
API_VERSION = 2
RESPONSE_TYPE = DescribeConfigsResponse_v2
SCHEMA = DescribeConfigsRequest_v1.SCHEMA
DescribeConfigsRequest = [
DescribeConfigsRequest_v0, DescribeConfigsRequest_v1,
DescribeConfigsRequest_v2,
]
DescribeConfigsResponse = [
DescribeConfigsResponse_v0, DescribeConfigsResponse_v1,
DescribeConfigsResponse_v2,
]
class SaslAuthenticateResponse_v0(Response):
API_KEY = 36
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('error_message', String('utf-8')),
('sasl_auth_bytes', Bytes)
)
class SaslAuthenticateResponse_v1(Response):
API_KEY = 36
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('error_message', String('utf-8')),
('sasl_auth_bytes', Bytes),
('session_lifetime_ms', Int64)
)
class SaslAuthenticateRequest_v0(Request):
API_KEY = 36
API_VERSION = 0
RESPONSE_TYPE = SaslAuthenticateResponse_v0
SCHEMA = Schema(
('sasl_auth_bytes', Bytes)
)
class SaslAuthenticateRequest_v1(Request):
API_KEY = 36
API_VERSION = 1
RESPONSE_TYPE = SaslAuthenticateResponse_v1
SCHEMA = SaslAuthenticateRequest_v0.SCHEMA
SaslAuthenticateRequest = [
SaslAuthenticateRequest_v0, SaslAuthenticateRequest_v1,
]
SaslAuthenticateResponse = [
SaslAuthenticateResponse_v0, SaslAuthenticateResponse_v1,
]
class CreatePartitionsResponse_v0(Response):
API_KEY = 37
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreatePartitionsResponse_v1(Response):
API_KEY = 37
API_VERSION = 1
SCHEMA = CreatePartitionsResponse_v0.SCHEMA
class CreatePartitionsRequest_v0(Request):
API_KEY = 37
API_VERSION = 0
RESPONSE_TYPE = CreatePartitionsResponse_v0
SCHEMA = Schema(
('topic_partitions', Array(
('topic', String('utf-8')),
('new_partitions', Schema(
('count', Int32),
('assignment', Array(Array(Int32))))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreatePartitionsRequest_v1(Request):
API_KEY = 37
API_VERSION = 1
SCHEMA = CreatePartitionsRequest_v0.SCHEMA
RESPONSE_TYPE = CreatePartitionsResponse_v1
CreatePartitionsRequest = [
CreatePartitionsRequest_v0, CreatePartitionsRequest_v1,
]
CreatePartitionsResponse = [
CreatePartitionsResponse_v0, CreatePartitionsResponse_v1,
]
class DeleteGroupsResponse_v0(Response):
API_KEY = 42
API_VERSION = 0
SCHEMA = Schema(
("throttle_time_ms", Int32),
("results", Array(
("group_id", String("utf-8")),
("error_code", Int16)))
)
class DeleteGroupsResponse_v1(Response):
API_KEY = 42
API_VERSION = 1
SCHEMA = DeleteGroupsResponse_v0.SCHEMA
class DeleteGroupsRequest_v0(Request):
API_KEY = 42
API_VERSION = 0
RESPONSE_TYPE = DeleteGroupsResponse_v0
SCHEMA = Schema(
("groups_names", Array(String("utf-8")))
)
class DeleteGroupsRequest_v1(Request):
API_KEY = 42
API_VERSION = 1
RESPONSE_TYPE = DeleteGroupsResponse_v1
SCHEMA = DeleteGroupsRequest_v0.SCHEMA
DeleteGroupsRequest = [
DeleteGroupsRequest_v0, DeleteGroupsRequest_v1
]
DeleteGroupsResponse = [
DeleteGroupsResponse_v0, DeleteGroupsResponse_v1
]

View File

@@ -0,0 +1,97 @@
from __future__ import absolute_import
import abc
from kafka.protocol.struct import Struct
from kafka.protocol.types import Int16, Int32, String, Schema, Array
class RequestHeader(Struct):
SCHEMA = Schema(
('api_key', Int16),
('api_version', Int16),
('correlation_id', Int32),
('client_id', String('utf-8'))
)
def __init__(self, request, correlation_id=0, client_id='kafka-python'):
super(RequestHeader, self).__init__(
request.API_KEY, request.API_VERSION, correlation_id, client_id
)
class Request(Struct):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def API_KEY(self):
"""Integer identifier for api request"""
pass
@abc.abstractproperty
def API_VERSION(self):
"""Integer of api request version"""
pass
@abc.abstractproperty
def SCHEMA(self):
"""An instance of Schema() representing the request structure"""
pass
@abc.abstractproperty
def RESPONSE_TYPE(self):
"""The Response class associated with the api request"""
pass
def expect_response(self):
"""Override this method if an api request does not always generate a response"""
return True
def to_object(self):
return _to_object(self.SCHEMA, self)
class Response(Struct):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def API_KEY(self):
"""Integer identifier for api request/response"""
pass
@abc.abstractproperty
def API_VERSION(self):
"""Integer of api request/response version"""
pass
@abc.abstractproperty
def SCHEMA(self):
"""An instance of Schema() representing the response structure"""
pass
def to_object(self):
return _to_object(self.SCHEMA, self)
def _to_object(schema, data):
obj = {}
for idx, (name, _type) in enumerate(zip(schema.names, schema.fields)):
if isinstance(data, Struct):
val = data.get_item(name)
else:
val = data[idx]
if isinstance(_type, Schema):
obj[name] = _to_object(_type, val)
elif isinstance(_type, Array):
if isinstance(_type.array_of, (Array, Schema)):
obj[name] = [
_to_object(_type.array_of, x)
for x in val
]
else:
obj[name] = val
else:
obj[name] = val
return obj

View File

@@ -0,0 +1,255 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Int8, Int16, Int32, Int64, Schema, String
class OffsetCommitResponse_v0(Response):
API_KEY = 8
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16)))))
)
class OffsetCommitResponse_v1(Response):
API_KEY = 8
API_VERSION = 1
SCHEMA = OffsetCommitResponse_v0.SCHEMA
class OffsetCommitResponse_v2(Response):
API_KEY = 8
API_VERSION = 2
SCHEMA = OffsetCommitResponse_v1.SCHEMA
class OffsetCommitResponse_v3(Response):
API_KEY = 8
API_VERSION = 3
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16)))))
)
class OffsetCommitRequest_v0(Request):
API_KEY = 8
API_VERSION = 0 # Zookeeper-backed storage
RESPONSE_TYPE = OffsetCommitResponse_v0
SCHEMA = Schema(
('consumer_group', String('utf-8')),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('metadata', String('utf-8'))))))
)
class OffsetCommitRequest_v1(Request):
API_KEY = 8
API_VERSION = 1 # Kafka-backed storage
RESPONSE_TYPE = OffsetCommitResponse_v1
SCHEMA = Schema(
('consumer_group', String('utf-8')),
('consumer_group_generation_id', Int32),
('consumer_id', String('utf-8')),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('timestamp', Int64),
('metadata', String('utf-8'))))))
)
class OffsetCommitRequest_v2(Request):
API_KEY = 8
API_VERSION = 2 # added retention_time, dropped timestamp
RESPONSE_TYPE = OffsetCommitResponse_v2
SCHEMA = Schema(
('consumer_group', String('utf-8')),
('consumer_group_generation_id', Int32),
('consumer_id', String('utf-8')),
('retention_time', Int64),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('metadata', String('utf-8'))))))
)
DEFAULT_GENERATION_ID = -1
DEFAULT_RETENTION_TIME = -1
class OffsetCommitRequest_v3(Request):
API_KEY = 8
API_VERSION = 3
RESPONSE_TYPE = OffsetCommitResponse_v3
SCHEMA = OffsetCommitRequest_v2.SCHEMA
OffsetCommitRequest = [
OffsetCommitRequest_v0, OffsetCommitRequest_v1,
OffsetCommitRequest_v2, OffsetCommitRequest_v3
]
OffsetCommitResponse = [
OffsetCommitResponse_v0, OffsetCommitResponse_v1,
OffsetCommitResponse_v2, OffsetCommitResponse_v3
]
class OffsetFetchResponse_v0(Response):
API_KEY = 9
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('metadata', String('utf-8')),
('error_code', Int16)))))
)
class OffsetFetchResponse_v1(Response):
API_KEY = 9
API_VERSION = 1
SCHEMA = OffsetFetchResponse_v0.SCHEMA
class OffsetFetchResponse_v2(Response):
# Added in KIP-88
API_KEY = 9
API_VERSION = 2
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('metadata', String('utf-8')),
('error_code', Int16))))),
('error_code', Int16)
)
class OffsetFetchResponse_v3(Response):
API_KEY = 9
API_VERSION = 3
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('metadata', String('utf-8')),
('error_code', Int16))))),
('error_code', Int16)
)
class OffsetFetchRequest_v0(Request):
API_KEY = 9
API_VERSION = 0 # zookeeper-backed storage
RESPONSE_TYPE = OffsetFetchResponse_v0
SCHEMA = Schema(
('consumer_group', String('utf-8')),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(Int32))))
)
class OffsetFetchRequest_v1(Request):
API_KEY = 9
API_VERSION = 1 # kafka-backed storage
RESPONSE_TYPE = OffsetFetchResponse_v1
SCHEMA = OffsetFetchRequest_v0.SCHEMA
class OffsetFetchRequest_v2(Request):
# KIP-88: Allows passing null topics to return offsets for all partitions
# that the consumer group has a stored offset for, even if no consumer in
# the group is currently consuming that partition.
API_KEY = 9
API_VERSION = 2
RESPONSE_TYPE = OffsetFetchResponse_v2
SCHEMA = OffsetFetchRequest_v1.SCHEMA
class OffsetFetchRequest_v3(Request):
API_KEY = 9
API_VERSION = 3
RESPONSE_TYPE = OffsetFetchResponse_v3
SCHEMA = OffsetFetchRequest_v2.SCHEMA
OffsetFetchRequest = [
OffsetFetchRequest_v0, OffsetFetchRequest_v1,
OffsetFetchRequest_v2, OffsetFetchRequest_v3,
]
OffsetFetchResponse = [
OffsetFetchResponse_v0, OffsetFetchResponse_v1,
OffsetFetchResponse_v2, OffsetFetchResponse_v3,
]
class GroupCoordinatorResponse_v0(Response):
API_KEY = 10
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('coordinator_id', Int32),
('host', String('utf-8')),
('port', Int32)
)
class GroupCoordinatorResponse_v1(Response):
API_KEY = 10
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('error_message', String('utf-8')),
('coordinator_id', Int32),
('host', String('utf-8')),
('port', Int32)
)
class GroupCoordinatorRequest_v0(Request):
API_KEY = 10
API_VERSION = 0
RESPONSE_TYPE = GroupCoordinatorResponse_v0
SCHEMA = Schema(
('consumer_group', String('utf-8'))
)
class GroupCoordinatorRequest_v1(Request):
API_KEY = 10
API_VERSION = 1
RESPONSE_TYPE = GroupCoordinatorResponse_v1
SCHEMA = Schema(
('coordinator_key', String('utf-8')),
('coordinator_type', Int8)
)
GroupCoordinatorRequest = [GroupCoordinatorRequest_v0, GroupCoordinatorRequest_v1]
GroupCoordinatorResponse = [GroupCoordinatorResponse_v0, GroupCoordinatorResponse_v1]

View File

@@ -0,0 +1,386 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Int8, Int16, Int32, Int64, Schema, String, Bytes
class FetchResponse_v0(Response):
API_KEY = 1
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topics', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('highwater_offset', Int64),
('message_set', Bytes)))))
)
class FetchResponse_v1(Response):
API_KEY = 1
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topics', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('highwater_offset', Int64),
('message_set', Bytes)))))
)
class FetchResponse_v2(Response):
API_KEY = 1
API_VERSION = 2
SCHEMA = FetchResponse_v1.SCHEMA # message format changed internally
class FetchResponse_v3(Response):
API_KEY = 1
API_VERSION = 3
SCHEMA = FetchResponse_v2.SCHEMA
class FetchResponse_v4(Response):
API_KEY = 1
API_VERSION = 4
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topics', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('highwater_offset', Int64),
('last_stable_offset', Int64),
('aborted_transactions', Array(
('producer_id', Int64),
('first_offset', Int64))),
('message_set', Bytes)))))
)
class FetchResponse_v5(Response):
API_KEY = 1
API_VERSION = 5
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topics', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('highwater_offset', Int64),
('last_stable_offset', Int64),
('log_start_offset', Int64),
('aborted_transactions', Array(
('producer_id', Int64),
('first_offset', Int64))),
('message_set', Bytes)))))
)
class FetchResponse_v6(Response):
"""
Same as FetchResponse_v5. The version number is bumped up to indicate that the client supports KafkaStorageException.
The KafkaStorageException will be translated to NotLeaderForPartitionException in the response if version <= 5
"""
API_KEY = 1
API_VERSION = 6
SCHEMA = FetchResponse_v5.SCHEMA
class FetchResponse_v7(Response):
"""
Add error_code and session_id to response
"""
API_KEY = 1
API_VERSION = 7
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('session_id', Int32),
('topics', Array(
('topics', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('highwater_offset', Int64),
('last_stable_offset', Int64),
('log_start_offset', Int64),
('aborted_transactions', Array(
('producer_id', Int64),
('first_offset', Int64))),
('message_set', Bytes)))))
)
class FetchResponse_v8(Response):
API_KEY = 1
API_VERSION = 8
SCHEMA = FetchResponse_v7.SCHEMA
class FetchResponse_v9(Response):
API_KEY = 1
API_VERSION = 9
SCHEMA = FetchResponse_v7.SCHEMA
class FetchResponse_v10(Response):
API_KEY = 1
API_VERSION = 10
SCHEMA = FetchResponse_v7.SCHEMA
class FetchResponse_v11(Response):
API_KEY = 1
API_VERSION = 11
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('session_id', Int32),
('topics', Array(
('topics', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('highwater_offset', Int64),
('last_stable_offset', Int64),
('log_start_offset', Int64),
('aborted_transactions', Array(
('producer_id', Int64),
('first_offset', Int64))),
('preferred_read_replica', Int32),
('message_set', Bytes)))))
)
class FetchRequest_v0(Request):
API_KEY = 1
API_VERSION = 0
RESPONSE_TYPE = FetchResponse_v0
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('max_bytes', Int32)))))
)
class FetchRequest_v1(Request):
API_KEY = 1
API_VERSION = 1
RESPONSE_TYPE = FetchResponse_v1
SCHEMA = FetchRequest_v0.SCHEMA
class FetchRequest_v2(Request):
API_KEY = 1
API_VERSION = 2
RESPONSE_TYPE = FetchResponse_v2
SCHEMA = FetchRequest_v1.SCHEMA
class FetchRequest_v3(Request):
API_KEY = 1
API_VERSION = 3
RESPONSE_TYPE = FetchResponse_v3
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('max_bytes', Int32), # This new field is only difference from FR_v2
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('max_bytes', Int32)))))
)
class FetchRequest_v4(Request):
# Adds isolation_level field
API_KEY = 1
API_VERSION = 4
RESPONSE_TYPE = FetchResponse_v4
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('max_bytes', Int32),
('isolation_level', Int8),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('offset', Int64),
('max_bytes', Int32)))))
)
class FetchRequest_v5(Request):
# This may only be used in broker-broker api calls
API_KEY = 1
API_VERSION = 5
RESPONSE_TYPE = FetchResponse_v5
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('max_bytes', Int32),
('isolation_level', Int8),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('fetch_offset', Int64),
('log_start_offset', Int64),
('max_bytes', Int32)))))
)
class FetchRequest_v6(Request):
"""
The body of FETCH_REQUEST_V6 is the same as FETCH_REQUEST_V5.
The version number is bumped up to indicate that the client supports KafkaStorageException.
The KafkaStorageException will be translated to NotLeaderForPartitionException in the response if version <= 5
"""
API_KEY = 1
API_VERSION = 6
RESPONSE_TYPE = FetchResponse_v6
SCHEMA = FetchRequest_v5.SCHEMA
class FetchRequest_v7(Request):
"""
Add incremental fetch requests
"""
API_KEY = 1
API_VERSION = 7
RESPONSE_TYPE = FetchResponse_v7
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('max_bytes', Int32),
('isolation_level', Int8),
('session_id', Int32),
('session_epoch', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('fetch_offset', Int64),
('log_start_offset', Int64),
('max_bytes', Int32))))),
('forgotten_topics_data', Array(
('topic', String),
('partitions', Array(Int32))
)),
)
class FetchRequest_v8(Request):
"""
bump used to indicate that on quota violation brokers send out responses before throttling.
"""
API_KEY = 1
API_VERSION = 8
RESPONSE_TYPE = FetchResponse_v8
SCHEMA = FetchRequest_v7.SCHEMA
class FetchRequest_v9(Request):
"""
adds the current leader epoch (see KIP-320)
"""
API_KEY = 1
API_VERSION = 9
RESPONSE_TYPE = FetchResponse_v9
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('max_bytes', Int32),
('isolation_level', Int8),
('session_id', Int32),
('session_epoch', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('current_leader_epoch', Int32),
('fetch_offset', Int64),
('log_start_offset', Int64),
('max_bytes', Int32))))),
('forgotten_topics_data', Array(
('topic', String),
('partitions', Array(Int32)),
)),
)
class FetchRequest_v10(Request):
"""
bumped up to indicate ZStandard capability. (see KIP-110)
"""
API_KEY = 1
API_VERSION = 10
RESPONSE_TYPE = FetchResponse_v10
SCHEMA = FetchRequest_v9.SCHEMA
class FetchRequest_v11(Request):
"""
added rack ID to support read from followers (KIP-392)
"""
API_KEY = 1
API_VERSION = 11
RESPONSE_TYPE = FetchResponse_v11
SCHEMA = Schema(
('replica_id', Int32),
('max_wait_time', Int32),
('min_bytes', Int32),
('max_bytes', Int32),
('isolation_level', Int8),
('session_id', Int32),
('session_epoch', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('current_leader_epoch', Int32),
('fetch_offset', Int64),
('log_start_offset', Int64),
('max_bytes', Int32))))),
('forgotten_topics_data', Array(
('topic', String),
('partitions', Array(Int32))
)),
('rack_id', String('utf-8')),
)
FetchRequest = [
FetchRequest_v0, FetchRequest_v1, FetchRequest_v2,
FetchRequest_v3, FetchRequest_v4, FetchRequest_v5,
FetchRequest_v6, FetchRequest_v7, FetchRequest_v8,
FetchRequest_v9, FetchRequest_v10, FetchRequest_v11,
]
FetchResponse = [
FetchResponse_v0, FetchResponse_v1, FetchResponse_v2,
FetchResponse_v3, FetchResponse_v4, FetchResponse_v5,
FetchResponse_v6, FetchResponse_v7, FetchResponse_v8,
FetchResponse_v9, FetchResponse_v10, FetchResponse_v11,
]

View File

@@ -0,0 +1,30 @@
class KafkaBytes(bytearray):
def __init__(self, size):
super(KafkaBytes, self).__init__(size)
self._idx = 0
def read(self, nbytes=None):
if nbytes is None:
nbytes = len(self) - self._idx
start = self._idx
self._idx += nbytes
if self._idx > len(self):
self._idx = len(self)
return bytes(self[start:self._idx])
def write(self, data):
start = self._idx
self._idx += len(data)
self[start:self._idx] = data
def seek(self, idx):
self._idx = idx
def tell(self):
return self._idx
def __str__(self):
return 'KafkaBytes(%d)' % len(self)
def __repr__(self):
return str(self)

View File

@@ -0,0 +1,230 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.struct import Struct
from kafka.protocol.types import Array, Bytes, Int16, Int32, Schema, String
class JoinGroupResponse_v0(Response):
API_KEY = 11
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('generation_id', Int32),
('group_protocol', String('utf-8')),
('leader_id', String('utf-8')),
('member_id', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('member_metadata', Bytes)))
)
class JoinGroupResponse_v1(Response):
API_KEY = 11
API_VERSION = 1
SCHEMA = JoinGroupResponse_v0.SCHEMA
class JoinGroupResponse_v2(Response):
API_KEY = 11
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('generation_id', Int32),
('group_protocol', String('utf-8')),
('leader_id', String('utf-8')),
('member_id', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('member_metadata', Bytes)))
)
class JoinGroupRequest_v0(Request):
API_KEY = 11
API_VERSION = 0
RESPONSE_TYPE = JoinGroupResponse_v0
SCHEMA = Schema(
('group', String('utf-8')),
('session_timeout', Int32),
('member_id', String('utf-8')),
('protocol_type', String('utf-8')),
('group_protocols', Array(
('protocol_name', String('utf-8')),
('protocol_metadata', Bytes)))
)
UNKNOWN_MEMBER_ID = ''
class JoinGroupRequest_v1(Request):
API_KEY = 11
API_VERSION = 1
RESPONSE_TYPE = JoinGroupResponse_v1
SCHEMA = Schema(
('group', String('utf-8')),
('session_timeout', Int32),
('rebalance_timeout', Int32),
('member_id', String('utf-8')),
('protocol_type', String('utf-8')),
('group_protocols', Array(
('protocol_name', String('utf-8')),
('protocol_metadata', Bytes)))
)
UNKNOWN_MEMBER_ID = ''
class JoinGroupRequest_v2(Request):
API_KEY = 11
API_VERSION = 2
RESPONSE_TYPE = JoinGroupResponse_v2
SCHEMA = JoinGroupRequest_v1.SCHEMA
UNKNOWN_MEMBER_ID = ''
JoinGroupRequest = [
JoinGroupRequest_v0, JoinGroupRequest_v1, JoinGroupRequest_v2
]
JoinGroupResponse = [
JoinGroupResponse_v0, JoinGroupResponse_v1, JoinGroupResponse_v2
]
class ProtocolMetadata(Struct):
SCHEMA = Schema(
('version', Int16),
('subscription', Array(String('utf-8'))), # topics list
('user_data', Bytes)
)
class SyncGroupResponse_v0(Response):
API_KEY = 14
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('member_assignment', Bytes)
)
class SyncGroupResponse_v1(Response):
API_KEY = 14
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('member_assignment', Bytes)
)
class SyncGroupRequest_v0(Request):
API_KEY = 14
API_VERSION = 0
RESPONSE_TYPE = SyncGroupResponse_v0
SCHEMA = Schema(
('group', String('utf-8')),
('generation_id', Int32),
('member_id', String('utf-8')),
('group_assignment', Array(
('member_id', String('utf-8')),
('member_metadata', Bytes)))
)
class SyncGroupRequest_v1(Request):
API_KEY = 14
API_VERSION = 1
RESPONSE_TYPE = SyncGroupResponse_v1
SCHEMA = SyncGroupRequest_v0.SCHEMA
SyncGroupRequest = [SyncGroupRequest_v0, SyncGroupRequest_v1]
SyncGroupResponse = [SyncGroupResponse_v0, SyncGroupResponse_v1]
class MemberAssignment(Struct):
SCHEMA = Schema(
('version', Int16),
('assignment', Array(
('topic', String('utf-8')),
('partitions', Array(Int32)))),
('user_data', Bytes)
)
class HeartbeatResponse_v0(Response):
API_KEY = 12
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16)
)
class HeartbeatResponse_v1(Response):
API_KEY = 12
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16)
)
class HeartbeatRequest_v0(Request):
API_KEY = 12
API_VERSION = 0
RESPONSE_TYPE = HeartbeatResponse_v0
SCHEMA = Schema(
('group', String('utf-8')),
('generation_id', Int32),
('member_id', String('utf-8'))
)
class HeartbeatRequest_v1(Request):
API_KEY = 12
API_VERSION = 1
RESPONSE_TYPE = HeartbeatResponse_v1
SCHEMA = HeartbeatRequest_v0.SCHEMA
HeartbeatRequest = [HeartbeatRequest_v0, HeartbeatRequest_v1]
HeartbeatResponse = [HeartbeatResponse_v0, HeartbeatResponse_v1]
class LeaveGroupResponse_v0(Response):
API_KEY = 13
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16)
)
class LeaveGroupResponse_v1(Response):
API_KEY = 13
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16)
)
class LeaveGroupRequest_v0(Request):
API_KEY = 13
API_VERSION = 0
RESPONSE_TYPE = LeaveGroupResponse_v0
SCHEMA = Schema(
('group', String('utf-8')),
('member_id', String('utf-8'))
)
class LeaveGroupRequest_v1(Request):
API_KEY = 13
API_VERSION = 1
RESPONSE_TYPE = LeaveGroupResponse_v1
SCHEMA = LeaveGroupRequest_v0.SCHEMA
LeaveGroupRequest = [LeaveGroupRequest_v0, LeaveGroupRequest_v1]
LeaveGroupResponse = [LeaveGroupResponse_v0, LeaveGroupResponse_v1]

View File

@@ -0,0 +1,216 @@
from __future__ import absolute_import
import io
import time
from kafka.codec import (has_gzip, has_snappy, has_lz4, has_zstd,
gzip_decode, snappy_decode, zstd_decode,
lz4_decode, lz4_decode_old_kafka)
from kafka.protocol.frame import KafkaBytes
from kafka.protocol.struct import Struct
from kafka.protocol.types import (
Int8, Int32, Int64, Bytes, Schema, AbstractType
)
from kafka.util import crc32, WeakMethod
class Message(Struct):
SCHEMAS = [
Schema(
('crc', Int32),
('magic', Int8),
('attributes', Int8),
('key', Bytes),
('value', Bytes)),
Schema(
('crc', Int32),
('magic', Int8),
('attributes', Int8),
('timestamp', Int64),
('key', Bytes),
('value', Bytes)),
]
SCHEMA = SCHEMAS[1]
CODEC_MASK = 0x07
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
CODEC_LZ4 = 0x03
CODEC_ZSTD = 0x04
TIMESTAMP_TYPE_MASK = 0x08
HEADER_SIZE = 22 # crc(4), magic(1), attributes(1), timestamp(8), key+value size(4*2)
def __init__(self, value, key=None, magic=0, attributes=0, crc=0,
timestamp=None):
assert value is None or isinstance(value, bytes), 'value must be bytes'
assert key is None or isinstance(key, bytes), 'key must be bytes'
assert magic > 0 or timestamp is None, 'timestamp not supported in v0'
# Default timestamp to now for v1 messages
if magic > 0 and timestamp is None:
timestamp = int(time.time() * 1000)
self.timestamp = timestamp
self.crc = crc
self._validated_crc = None
self.magic = magic
self.attributes = attributes
self.key = key
self.value = value
self.encode = WeakMethod(self._encode_self)
@property
def timestamp_type(self):
"""0 for CreateTime; 1 for LogAppendTime; None if unsupported.
Value is determined by broker; produced messages should always set to 0
Requires Kafka >= 0.10 / message version >= 1
"""
if self.magic == 0:
return None
elif self.attributes & self.TIMESTAMP_TYPE_MASK:
return 1
else:
return 0
def _encode_self(self, recalc_crc=True):
version = self.magic
if version == 1:
fields = (self.crc, self.magic, self.attributes, self.timestamp, self.key, self.value)
elif version == 0:
fields = (self.crc, self.magic, self.attributes, self.key, self.value)
else:
raise ValueError('Unrecognized message version: %s' % (version,))
message = Message.SCHEMAS[version].encode(fields)
if not recalc_crc:
return message
self.crc = crc32(message[4:])
crc_field = self.SCHEMAS[version].fields[0]
return crc_field.encode(self.crc) + message[4:]
@classmethod
def decode(cls, data):
_validated_crc = None
if isinstance(data, bytes):
_validated_crc = crc32(data[4:])
data = io.BytesIO(data)
# Partial decode required to determine message version
base_fields = cls.SCHEMAS[0].fields[0:3]
crc, magic, attributes = [field.decode(data) for field in base_fields]
remaining = cls.SCHEMAS[magic].fields[3:]
fields = [field.decode(data) for field in remaining]
if magic == 1:
timestamp = fields[0]
else:
timestamp = None
msg = cls(fields[-1], key=fields[-2],
magic=magic, attributes=attributes, crc=crc,
timestamp=timestamp)
msg._validated_crc = _validated_crc
return msg
def validate_crc(self):
if self._validated_crc is None:
raw_msg = self._encode_self(recalc_crc=False)
self._validated_crc = crc32(raw_msg[4:])
if self.crc == self._validated_crc:
return True
return False
def is_compressed(self):
return self.attributes & self.CODEC_MASK != 0
def decompress(self):
codec = self.attributes & self.CODEC_MASK
assert codec in (self.CODEC_GZIP, self.CODEC_SNAPPY, self.CODEC_LZ4, self.CODEC_ZSTD)
if codec == self.CODEC_GZIP:
assert has_gzip(), 'Gzip decompression unsupported'
raw_bytes = gzip_decode(self.value)
elif codec == self.CODEC_SNAPPY:
assert has_snappy(), 'Snappy decompression unsupported'
raw_bytes = snappy_decode(self.value)
elif codec == self.CODEC_LZ4:
assert has_lz4(), 'LZ4 decompression unsupported'
if self.magic == 0:
raw_bytes = lz4_decode_old_kafka(self.value)
else:
raw_bytes = lz4_decode(self.value)
elif codec == self.CODEC_ZSTD:
assert has_zstd(), "ZSTD decompression unsupported"
raw_bytes = zstd_decode(self.value)
else:
raise Exception('This should be impossible')
return MessageSet.decode(raw_bytes, bytes_to_read=len(raw_bytes))
def __hash__(self):
return hash(self._encode_self(recalc_crc=False))
class PartialMessage(bytes):
def __repr__(self):
return 'PartialMessage(%s)' % (self,)
class MessageSet(AbstractType):
ITEM = Schema(
('offset', Int64),
('message', Bytes)
)
HEADER_SIZE = 12 # offset + message_size
@classmethod
def encode(cls, items, prepend_size=True):
# RecordAccumulator encodes messagesets internally
if isinstance(items, (io.BytesIO, KafkaBytes)):
size = Int32.decode(items)
if prepend_size:
# rewind and return all the bytes
items.seek(items.tell() - 4)
size += 4
return items.read(size)
encoded_values = []
for (offset, message) in items:
encoded_values.append(Int64.encode(offset))
encoded_values.append(Bytes.encode(message))
encoded = b''.join(encoded_values)
if prepend_size:
return Bytes.encode(encoded)
else:
return encoded
@classmethod
def decode(cls, data, bytes_to_read=None):
"""Compressed messages should pass in bytes_to_read (via message size)
otherwise, we decode from data as Int32
"""
if isinstance(data, bytes):
data = io.BytesIO(data)
if bytes_to_read is None:
bytes_to_read = Int32.decode(data)
# if FetchRequest max_bytes is smaller than the available message set
# the server returns partial data for the final message
# So create an internal buffer to avoid over-reading
raw = io.BytesIO(data.read(bytes_to_read))
items = []
while bytes_to_read:
try:
offset = Int64.decode(raw)
msg_bytes = Bytes.decode(raw)
bytes_to_read -= 8 + 4 + len(msg_bytes)
items.append((offset, len(msg_bytes), Message.decode(msg_bytes)))
except ValueError:
# PartialMessage to signal that max_bytes may be too small
items.append((None, None, PartialMessage()))
break
return items
@classmethod
def repr(cls, messages):
if isinstance(messages, (KafkaBytes, io.BytesIO)):
offset = messages.tell()
decoded = cls.decode(messages)
messages.seek(offset)
messages = decoded
return str([cls.ITEM.repr(m) for m in messages])

View File

@@ -0,0 +1,200 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Boolean, Int16, Int32, Schema, String
class MetadataResponse_v0(Response):
API_KEY = 3
API_VERSION = 0
SCHEMA = Schema(
('brokers', Array(
('node_id', Int32),
('host', String('utf-8')),
('port', Int32))),
('topics', Array(
('error_code', Int16),
('topic', String('utf-8')),
('partitions', Array(
('error_code', Int16),
('partition', Int32),
('leader', Int32),
('replicas', Array(Int32)),
('isr', Array(Int32))))))
)
class MetadataResponse_v1(Response):
API_KEY = 3
API_VERSION = 1
SCHEMA = Schema(
('brokers', Array(
('node_id', Int32),
('host', String('utf-8')),
('port', Int32),
('rack', String('utf-8')))),
('controller_id', Int32),
('topics', Array(
('error_code', Int16),
('topic', String('utf-8')),
('is_internal', Boolean),
('partitions', Array(
('error_code', Int16),
('partition', Int32),
('leader', Int32),
('replicas', Array(Int32)),
('isr', Array(Int32))))))
)
class MetadataResponse_v2(Response):
API_KEY = 3
API_VERSION = 2
SCHEMA = Schema(
('brokers', Array(
('node_id', Int32),
('host', String('utf-8')),
('port', Int32),
('rack', String('utf-8')))),
('cluster_id', String('utf-8')), # <-- Added cluster_id field in v2
('controller_id', Int32),
('topics', Array(
('error_code', Int16),
('topic', String('utf-8')),
('is_internal', Boolean),
('partitions', Array(
('error_code', Int16),
('partition', Int32),
('leader', Int32),
('replicas', Array(Int32)),
('isr', Array(Int32))))))
)
class MetadataResponse_v3(Response):
API_KEY = 3
API_VERSION = 3
SCHEMA = Schema(
('throttle_time_ms', Int32),
('brokers', Array(
('node_id', Int32),
('host', String('utf-8')),
('port', Int32),
('rack', String('utf-8')))),
('cluster_id', String('utf-8')),
('controller_id', Int32),
('topics', Array(
('error_code', Int16),
('topic', String('utf-8')),
('is_internal', Boolean),
('partitions', Array(
('error_code', Int16),
('partition', Int32),
('leader', Int32),
('replicas', Array(Int32)),
('isr', Array(Int32))))))
)
class MetadataResponse_v4(Response):
API_KEY = 3
API_VERSION = 4
SCHEMA = MetadataResponse_v3.SCHEMA
class MetadataResponse_v5(Response):
API_KEY = 3
API_VERSION = 5
SCHEMA = Schema(
('throttle_time_ms', Int32),
('brokers', Array(
('node_id', Int32),
('host', String('utf-8')),
('port', Int32),
('rack', String('utf-8')))),
('cluster_id', String('utf-8')),
('controller_id', Int32),
('topics', Array(
('error_code', Int16),
('topic', String('utf-8')),
('is_internal', Boolean),
('partitions', Array(
('error_code', Int16),
('partition', Int32),
('leader', Int32),
('replicas', Array(Int32)),
('isr', Array(Int32)),
('offline_replicas', Array(Int32))))))
)
class MetadataRequest_v0(Request):
API_KEY = 3
API_VERSION = 0
RESPONSE_TYPE = MetadataResponse_v0
SCHEMA = Schema(
('topics', Array(String('utf-8')))
)
ALL_TOPICS = None # Empty Array (len 0) for topics returns all topics
class MetadataRequest_v1(Request):
API_KEY = 3
API_VERSION = 1
RESPONSE_TYPE = MetadataResponse_v1
SCHEMA = MetadataRequest_v0.SCHEMA
ALL_TOPICS = -1 # Null Array (len -1) for topics returns all topics
NO_TOPICS = None # Empty array (len 0) for topics returns no topics
class MetadataRequest_v2(Request):
API_KEY = 3
API_VERSION = 2
RESPONSE_TYPE = MetadataResponse_v2
SCHEMA = MetadataRequest_v1.SCHEMA
ALL_TOPICS = -1 # Null Array (len -1) for topics returns all topics
NO_TOPICS = None # Empty array (len 0) for topics returns no topics
class MetadataRequest_v3(Request):
API_KEY = 3
API_VERSION = 3
RESPONSE_TYPE = MetadataResponse_v3
SCHEMA = MetadataRequest_v1.SCHEMA
ALL_TOPICS = -1 # Null Array (len -1) for topics returns all topics
NO_TOPICS = None # Empty array (len 0) for topics returns no topics
class MetadataRequest_v4(Request):
API_KEY = 3
API_VERSION = 4
RESPONSE_TYPE = MetadataResponse_v4
SCHEMA = Schema(
('topics', Array(String('utf-8'))),
('allow_auto_topic_creation', Boolean)
)
ALL_TOPICS = -1 # Null Array (len -1) for topics returns all topics
NO_TOPICS = None # Empty array (len 0) for topics returns no topics
class MetadataRequest_v5(Request):
"""
The v5 metadata request is the same as v4.
An additional field for offline_replicas has been added to the v5 metadata response
"""
API_KEY = 3
API_VERSION = 5
RESPONSE_TYPE = MetadataResponse_v5
SCHEMA = MetadataRequest_v4.SCHEMA
ALL_TOPICS = -1 # Null Array (len -1) for topics returns all topics
NO_TOPICS = None # Empty array (len 0) for topics returns no topics
MetadataRequest = [
MetadataRequest_v0, MetadataRequest_v1, MetadataRequest_v2,
MetadataRequest_v3, MetadataRequest_v4, MetadataRequest_v5
]
MetadataResponse = [
MetadataResponse_v0, MetadataResponse_v1, MetadataResponse_v2,
MetadataResponse_v3, MetadataResponse_v4, MetadataResponse_v5
]

View File

@@ -0,0 +1,194 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Int8, Int16, Int32, Int64, Schema, String
UNKNOWN_OFFSET = -1
class OffsetResetStrategy(object):
LATEST = -1
EARLIEST = -2
NONE = 0
class OffsetResponse_v0(Response):
API_KEY = 2
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offsets', Array(Int64))))))
)
class OffsetResponse_v1(Response):
API_KEY = 2
API_VERSION = 1
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('timestamp', Int64),
('offset', Int64)))))
)
class OffsetResponse_v2(Response):
API_KEY = 2
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('timestamp', Int64),
('offset', Int64)))))
)
class OffsetResponse_v3(Response):
"""
on quota violation, brokers send out responses before throttling
"""
API_KEY = 2
API_VERSION = 3
SCHEMA = OffsetResponse_v2.SCHEMA
class OffsetResponse_v4(Response):
"""
Add leader_epoch to response
"""
API_KEY = 2
API_VERSION = 4
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('timestamp', Int64),
('offset', Int64),
('leader_epoch', Int32)))))
)
class OffsetResponse_v5(Response):
"""
adds a new error code, OFFSET_NOT_AVAILABLE
"""
API_KEY = 2
API_VERSION = 5
SCHEMA = OffsetResponse_v4.SCHEMA
class OffsetRequest_v0(Request):
API_KEY = 2
API_VERSION = 0
RESPONSE_TYPE = OffsetResponse_v0
SCHEMA = Schema(
('replica_id', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('timestamp', Int64),
('max_offsets', Int32)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v1(Request):
API_KEY = 2
API_VERSION = 1
RESPONSE_TYPE = OffsetResponse_v1
SCHEMA = Schema(
('replica_id', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('timestamp', Int64)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v2(Request):
API_KEY = 2
API_VERSION = 2
RESPONSE_TYPE = OffsetResponse_v2
SCHEMA = Schema(
('replica_id', Int32),
('isolation_level', Int8), # <- added isolation_level
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('timestamp', Int64)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v3(Request):
API_KEY = 2
API_VERSION = 3
RESPONSE_TYPE = OffsetResponse_v3
SCHEMA = OffsetRequest_v2.SCHEMA
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v4(Request):
"""
Add current_leader_epoch to request
"""
API_KEY = 2
API_VERSION = 4
RESPONSE_TYPE = OffsetResponse_v4
SCHEMA = Schema(
('replica_id', Int32),
('isolation_level', Int8), # <- added isolation_level
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('current_leader_epoch', Int64),
('timestamp', Int64)))))
)
DEFAULTS = {
'replica_id': -1
}
class OffsetRequest_v5(Request):
API_KEY = 2
API_VERSION = 5
RESPONSE_TYPE = OffsetResponse_v5
SCHEMA = OffsetRequest_v4.SCHEMA
DEFAULTS = {
'replica_id': -1
}
OffsetRequest = [
OffsetRequest_v0, OffsetRequest_v1, OffsetRequest_v2,
OffsetRequest_v3, OffsetRequest_v4, OffsetRequest_v5,
]
OffsetResponse = [
OffsetResponse_v0, OffsetResponse_v1, OffsetResponse_v2,
OffsetResponse_v3, OffsetResponse_v4, OffsetResponse_v5,
]

View File

@@ -0,0 +1,183 @@
from __future__ import absolute_import
import collections
import logging
import kafka.errors as Errors
from kafka.protocol.api import RequestHeader
from kafka.protocol.commit import GroupCoordinatorResponse
from kafka.protocol.frame import KafkaBytes
from kafka.protocol.types import Int32
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaProtocol(object):
"""Manage the kafka network protocol
Use an instance of KafkaProtocol to manage bytes send/recv'd
from a network socket to a broker.
Arguments:
client_id (str): identifier string to be included in each request
api_version (tuple): Optional tuple to specify api_version to use.
Currently only used to check for 0.8.2 protocol quirks, but
may be used for more in the future.
"""
def __init__(self, client_id=None, api_version=None):
if client_id is None:
client_id = self._gen_client_id()
self._client_id = client_id
self._api_version = api_version
self._correlation_id = 0
self._header = KafkaBytes(4)
self._rbuffer = None
self._receiving = False
self.in_flight_requests = collections.deque()
self.bytes_to_send = []
def _next_correlation_id(self):
self._correlation_id = (self._correlation_id + 1) % 2**31
return self._correlation_id
def _gen_client_id(self):
return 'kafka-python' + __version__
def send_request(self, request, correlation_id=None):
"""Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
"""
log.debug('Sending request %s', request)
if correlation_id is None:
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self._client_id)
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
self.bytes_to_send.append(data)
if request.expect_response():
ifr = (correlation_id, request)
self.in_flight_requests.append(ifr)
return correlation_id
def send_bytes(self):
"""Retrieve all pending bytes to send on the network"""
data = b''.join(self.bytes_to_send)
self.bytes_to_send = []
return data
def receive_bytes(self, data):
"""Process bytes received from the network.
Arguments:
data (bytes): any length bytes received from a network connection
to a kafka broker.
Returns:
responses (list of (correlation_id, response)): any/all completed
responses, decoded from bytes to python objects.
Raises:
KafkaProtocolError: if the bytes received could not be decoded.
CorrelationIdError: if the response does not match the request
correlation id.
"""
i = 0
n = len(data)
responses = []
while i < n:
# Not receiving is the state of reading the payload header
if not self._receiving:
bytes_to_read = min(4 - self._header.tell(), n - i)
self._header.write(data[i:i+bytes_to_read])
i += bytes_to_read
if self._header.tell() == 4:
self._header.seek(0)
nbytes = Int32.decode(self._header)
# reset buffer and switch state to receiving payload bytes
self._rbuffer = KafkaBytes(nbytes)
self._receiving = True
elif self._header.tell() > 4:
raise Errors.KafkaError('this should not happen - are you threading?')
if self._receiving:
total_bytes = len(self._rbuffer)
staged_bytes = self._rbuffer.tell()
bytes_to_read = min(total_bytes - staged_bytes, n - i)
self._rbuffer.write(data[i:i+bytes_to_read])
i += bytes_to_read
staged_bytes = self._rbuffer.tell()
if staged_bytes > total_bytes:
raise Errors.KafkaError('Receive buffer has more bytes than expected?')
if staged_bytes != total_bytes:
break
self._receiving = False
self._rbuffer.seek(0)
resp = self._process_response(self._rbuffer)
responses.append(resp)
self._reset_buffer()
return responses
def _process_response(self, read_buffer):
recv_correlation_id = Int32.decode(read_buffer)
log.debug('Received correlation id: %d', recv_correlation_id)
if not self.in_flight_requests:
raise Errors.CorrelationIdError(
'No in-flight-request found for server response'
' with correlation ID %d'
% (recv_correlation_id,))
(correlation_id, request) = self.in_flight_requests.popleft()
# 0.8.2 quirk
if (recv_correlation_id == 0 and
correlation_id != 0 and
request.RESPONSE_TYPE is GroupCoordinatorResponse[0] and
(self._api_version == (0, 8, 2) or self._api_version is None)):
log.warning('Kafka 0.8.2 quirk -- GroupCoordinatorResponse'
' Correlation ID does not match request. This'
' should go away once at least one topic has been'
' initialized on the broker.')
elif correlation_id != recv_correlation_id:
# return or raise?
raise Errors.CorrelationIdError(
'Correlation IDs do not match: sent %d, recv %d'
% (correlation_id, recv_correlation_id))
# decode response
log.debug('Processing response %s', request.RESPONSE_TYPE.__name__)
try:
response = request.RESPONSE_TYPE.decode(read_buffer)
except ValueError:
read_buffer.seek(0)
buf = read_buffer.read()
log.error('Response %d [ResponseType: %s Request: %s]:'
' Unable to decode %d-byte buffer: %r',
correlation_id, request.RESPONSE_TYPE,
request, len(buf), buf)
raise Errors.KafkaProtocolError('Unable to decode response')
return (correlation_id, response)
def _reset_buffer(self):
self._receiving = False
self._header.seek(0)
self._rbuffer = None

View File

@@ -0,0 +1,35 @@
from __future__ import absolute_import
try:
import copyreg # pylint: disable=import-error
except ImportError:
import copy_reg as copyreg # pylint: disable=import-error
import types
def _pickle_method(method):
try:
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
except AttributeError:
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
# https://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method)

View File

@@ -0,0 +1,232 @@
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Int16, Int32, Int64, String, Array, Schema, Bytes
class ProduceResponse_v0(Response):
API_KEY = 0
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64)))))
)
class ProduceResponse_v1(Response):
API_KEY = 0
API_VERSION = 1
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64))))),
('throttle_time_ms', Int32)
)
class ProduceResponse_v2(Response):
API_KEY = 0
API_VERSION = 2
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64),
('timestamp', Int64))))),
('throttle_time_ms', Int32)
)
class ProduceResponse_v3(Response):
API_KEY = 0
API_VERSION = 3
SCHEMA = ProduceResponse_v2.SCHEMA
class ProduceResponse_v4(Response):
"""
The version number is bumped up to indicate that the client supports KafkaStorageException.
The KafkaStorageException will be translated to NotLeaderForPartitionException in the response if version <= 3
"""
API_KEY = 0
API_VERSION = 4
SCHEMA = ProduceResponse_v3.SCHEMA
class ProduceResponse_v5(Response):
API_KEY = 0
API_VERSION = 5
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64),
('timestamp', Int64),
('log_start_offset', Int64))))),
('throttle_time_ms', Int32)
)
class ProduceResponse_v6(Response):
"""
The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
"""
API_KEY = 0
API_VERSION = 6
SCHEMA = ProduceResponse_v5.SCHEMA
class ProduceResponse_v7(Response):
"""
V7 bumped up to indicate ZStandard capability. (see KIP-110)
"""
API_KEY = 0
API_VERSION = 7
SCHEMA = ProduceResponse_v6.SCHEMA
class ProduceResponse_v8(Response):
"""
V8 bumped up to add two new fields record_errors offset list and error_message
(See KIP-467)
"""
API_KEY = 0
API_VERSION = 8
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64),
('timestamp', Int64),
('log_start_offset', Int64)),
('record_errors', (Array(
('batch_index', Int32),
('batch_index_error_message', String('utf-8'))
))),
('error_message', String('utf-8'))
))),
('throttle_time_ms', Int32)
)
class ProduceRequest(Request):
API_KEY = 0
def expect_response(self):
if self.required_acks == 0: # pylint: disable=no-member
return False
return True
class ProduceRequest_v0(ProduceRequest):
API_VERSION = 0
RESPONSE_TYPE = ProduceResponse_v0
SCHEMA = Schema(
('required_acks', Int16),
('timeout', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('messages', Bytes)))))
)
class ProduceRequest_v1(ProduceRequest):
API_VERSION = 1
RESPONSE_TYPE = ProduceResponse_v1
SCHEMA = ProduceRequest_v0.SCHEMA
class ProduceRequest_v2(ProduceRequest):
API_VERSION = 2
RESPONSE_TYPE = ProduceResponse_v2
SCHEMA = ProduceRequest_v1.SCHEMA
class ProduceRequest_v3(ProduceRequest):
API_VERSION = 3
RESPONSE_TYPE = ProduceResponse_v3
SCHEMA = Schema(
('transactional_id', String('utf-8')),
('required_acks', Int16),
('timeout', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('messages', Bytes)))))
)
class ProduceRequest_v4(ProduceRequest):
"""
The version number is bumped up to indicate that the client supports KafkaStorageException.
The KafkaStorageException will be translated to NotLeaderForPartitionException in the response if version <= 3
"""
API_VERSION = 4
RESPONSE_TYPE = ProduceResponse_v4
SCHEMA = ProduceRequest_v3.SCHEMA
class ProduceRequest_v5(ProduceRequest):
"""
Same as v4. The version number is bumped since the v5 response includes an additional
partition level field: the log_start_offset.
"""
API_VERSION = 5
RESPONSE_TYPE = ProduceResponse_v5
SCHEMA = ProduceRequest_v4.SCHEMA
class ProduceRequest_v6(ProduceRequest):
"""
The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
"""
API_VERSION = 6
RESPONSE_TYPE = ProduceResponse_v6
SCHEMA = ProduceRequest_v5.SCHEMA
class ProduceRequest_v7(ProduceRequest):
"""
V7 bumped up to indicate ZStandard capability. (see KIP-110)
"""
API_VERSION = 7
RESPONSE_TYPE = ProduceResponse_v7
SCHEMA = ProduceRequest_v6.SCHEMA
class ProduceRequest_v8(ProduceRequest):
"""
V8 bumped up to add two new fields record_errors offset list and error_message to PartitionResponse
(See KIP-467)
"""
API_VERSION = 8
RESPONSE_TYPE = ProduceResponse_v8
SCHEMA = ProduceRequest_v7.SCHEMA
ProduceRequest = [
ProduceRequest_v0, ProduceRequest_v1, ProduceRequest_v2,
ProduceRequest_v3, ProduceRequest_v4, ProduceRequest_v5,
ProduceRequest_v6, ProduceRequest_v7, ProduceRequest_v8,
]
ProduceResponse = [
ProduceResponse_v0, ProduceResponse_v1, ProduceResponse_v2,
ProduceResponse_v3, ProduceResponse_v4, ProduceResponse_v5,
ProduceResponse_v6, ProduceResponse_v7, ProduceResponse_v8,
]

View File

@@ -0,0 +1,72 @@
from __future__ import absolute_import
from io import BytesIO
from kafka.protocol.abstract import AbstractType
from kafka.protocol.types import Schema
from kafka.util import WeakMethod
class Struct(AbstractType):
SCHEMA = Schema()
def __init__(self, *args, **kwargs):
if len(args) == len(self.SCHEMA.fields):
for i, name in enumerate(self.SCHEMA.names):
self.__dict__[name] = args[i]
elif len(args) > 0:
raise ValueError('Args must be empty or mirror schema')
else:
for name in self.SCHEMA.names:
self.__dict__[name] = kwargs.pop(name, None)
if kwargs:
raise ValueError('Keyword(s) not in schema %s: %s'
% (list(self.SCHEMA.names),
', '.join(kwargs.keys())))
# overloading encode() to support both class and instance
# Without WeakMethod() this creates circular ref, which
# causes instances to "leak" to garbage
self.encode = WeakMethod(self._encode_self)
@classmethod
def encode(cls, item): # pylint: disable=E0202
bits = []
for i, field in enumerate(cls.SCHEMA.fields):
bits.append(field.encode(item[i]))
return b''.join(bits)
def _encode_self(self):
return self.SCHEMA.encode(
[self.__dict__[name] for name in self.SCHEMA.names]
)
@classmethod
def decode(cls, data):
if isinstance(data, bytes):
data = BytesIO(data)
return cls(*[field.decode(data) for field in cls.SCHEMA.fields])
def get_item(self, name):
if name not in self.SCHEMA.names:
raise KeyError("%s is not in the schema" % name)
return self.__dict__[name]
def __repr__(self):
key_vals = []
for name, field in zip(self.SCHEMA.names, self.SCHEMA.fields):
key_vals.append('%s=%s' % (name, field.repr(self.__dict__[name])))
return self.__class__.__name__ + '(' + ', '.join(key_vals) + ')'
def __hash__(self):
return hash(self.encode())
def __eq__(self, other):
if self.SCHEMA != other.SCHEMA:
return False
for attr in self.SCHEMA.names:
if self.__dict__[attr] != other.__dict__[attr]:
return False
return True

View File

@@ -0,0 +1,198 @@
from __future__ import absolute_import
import struct
from struct import error
from kafka.protocol.abstract import AbstractType
def _pack(f, value):
try:
return f(value)
except error as e:
raise ValueError("Error encountered when attempting to convert value: "
"{!r} to struct format: '{}', hit error: {}"
.format(value, f, e))
def _unpack(f, data):
try:
(value,) = f(data)
return value
except error as e:
raise ValueError("Error encountered when attempting to convert value: "
"{!r} to struct format: '{}', hit error: {}"
.format(data, f, e))
class Int8(AbstractType):
_pack = struct.Struct('>b').pack
_unpack = struct.Struct('>b').unpack
@classmethod
def encode(cls, value):
return _pack(cls._pack, value)
@classmethod
def decode(cls, data):
return _unpack(cls._unpack, data.read(1))
class Int16(AbstractType):
_pack = struct.Struct('>h').pack
_unpack = struct.Struct('>h').unpack
@classmethod
def encode(cls, value):
return _pack(cls._pack, value)
@classmethod
def decode(cls, data):
return _unpack(cls._unpack, data.read(2))
class Int32(AbstractType):
_pack = struct.Struct('>i').pack
_unpack = struct.Struct('>i').unpack
@classmethod
def encode(cls, value):
return _pack(cls._pack, value)
@classmethod
def decode(cls, data):
return _unpack(cls._unpack, data.read(4))
class Int64(AbstractType):
_pack = struct.Struct('>q').pack
_unpack = struct.Struct('>q').unpack
@classmethod
def encode(cls, value):
return _pack(cls._pack, value)
@classmethod
def decode(cls, data):
return _unpack(cls._unpack, data.read(8))
class String(AbstractType):
def __init__(self, encoding='utf-8'):
self.encoding = encoding
def encode(self, value):
if value is None:
return Int16.encode(-1)
value = str(value).encode(self.encoding)
return Int16.encode(len(value)) + value
def decode(self, data):
length = Int16.decode(data)
if length < 0:
return None
value = data.read(length)
if len(value) != length:
raise ValueError('Buffer underrun decoding string')
return value.decode(self.encoding)
class Bytes(AbstractType):
@classmethod
def encode(cls, value):
if value is None:
return Int32.encode(-1)
else:
return Int32.encode(len(value)) + value
@classmethod
def decode(cls, data):
length = Int32.decode(data)
if length < 0:
return None
value = data.read(length)
if len(value) != length:
raise ValueError('Buffer underrun decoding Bytes')
return value
@classmethod
def repr(cls, value):
return repr(value[:100] + b'...' if value is not None and len(value) > 100 else value)
class Boolean(AbstractType):
_pack = struct.Struct('>?').pack
_unpack = struct.Struct('>?').unpack
@classmethod
def encode(cls, value):
return _pack(cls._pack, value)
@classmethod
def decode(cls, data):
return _unpack(cls._unpack, data.read(1))
class Schema(AbstractType):
def __init__(self, *fields):
if fields:
self.names, self.fields = zip(*fields)
else:
self.names, self.fields = (), ()
def encode(self, item):
if len(item) != len(self.fields):
raise ValueError('Item field count does not match Schema')
return b''.join([
field.encode(item[i])
for i, field in enumerate(self.fields)
])
def decode(self, data):
return tuple([field.decode(data) for field in self.fields])
def __len__(self):
return len(self.fields)
def repr(self, value):
key_vals = []
try:
for i in range(len(self)):
try:
field_val = getattr(value, self.names[i])
except AttributeError:
field_val = value[i]
key_vals.append('%s=%s' % (self.names[i], self.fields[i].repr(field_val)))
return '(' + ', '.join(key_vals) + ')'
except Exception:
return repr(value)
class Array(AbstractType):
def __init__(self, *array_of):
if len(array_of) > 1:
self.array_of = Schema(*array_of)
elif len(array_of) == 1 and (isinstance(array_of[0], AbstractType) or
issubclass(array_of[0], AbstractType)):
self.array_of = array_of[0]
else:
raise ValueError('Array instantiated with no array_of type')
def encode(self, items):
if items is None:
return Int32.encode(-1)
return b''.join(
[Int32.encode(len(items))] +
[self.array_of.encode(item) for item in items]
)
def decode(self, data):
length = Int32.decode(data)
if length == -1:
return None
return [self.array_of.decode(data) for _ in range(length)]
def repr(self, list_of_items):
if list_of_items is None:
return 'NULL'
return '[' + ', '.join([self.array_of.repr(item) for item in list_of_items]) + ']'