diff options
| author | Taras Voinarovskyi <voyn1991@gmail.com> | 2017-10-22 16:56:28 +0900 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2017-10-22 16:56:28 +0900 |
| commit | a345dcd2ca1b0f8934864c512a4a78c65034dd36 (patch) | |
| tree | 0b7ea8c67b015f944b9a401f5e024a2eff7c7db9 /kafka | |
| parent | 4dbf34abce9b4addbb304520e2f692fbaef60ae5 (diff) | |
| download | kafka-python-a345dcd2ca1b0f8934864c512a4a78c65034dd36.tar.gz | |
Fix timestamp not passed to RecordMetadata (#1273)
* Fix timestamp not being passed to RecordMetadata properly
* Add more tests for LegacyBatch
* Fix producer test for recordmetadata
Diffstat (limited to 'kafka')
| -rw-r--r-- | kafka/producer/future.py | 4 | ||||
| -rw-r--r-- | kafka/producer/record_accumulator.py | 11 | ||||
| -rw-r--r-- | kafka/record/legacy_records.py | 48 | ||||
| -rw-r--r-- | kafka/record/memory_records.py | 9 |
4 files changed, 56 insertions, 16 deletions
diff --git a/kafka/producer/future.py b/kafka/producer/future.py index bc50d0d..e39a0a9 100644 --- a/kafka/producer/future.py +++ b/kafka/producer/future.py @@ -44,7 +44,9 @@ class FutureRecordMetadata(Future): (relative_offset, timestamp_ms, checksum, serialized_key_size, serialized_value_size) = self.args - if produce_timestamp_ms is not None: + # None is when Broker does not support the API (<0.10) and + # -1 is when the broker is configured for CREATE_TIME timestamps + if produce_timestamp_ms is not None and produce_timestamp_ms != -1: timestamp_ms = produce_timestamp_ms if offset != -1 and relative_offset is not None: offset += relative_offset diff --git a/kafka/producer/record_accumulator.py b/kafka/producer/record_accumulator.py index 716ae65..5158474 100644 --- a/kafka/producer/record_accumulator.py +++ b/kafka/producer/record_accumulator.py @@ -56,15 +56,14 @@ class ProducerBatch(object): return self.records.next_offset() def try_append(self, timestamp_ms, key, value): - offset = self.records.next_offset() - checksum, record_size = self.records.append(timestamp_ms, key, value) - if record_size == 0: + metadata = self.records.append(timestamp_ms, key, value) + if metadata is None: return None - self.max_record_size = max(self.max_record_size, record_size) + self.max_record_size = max(self.max_record_size, metadata.size) self.last_append = time.time() - future = FutureRecordMetadata(self.produce_future, offset, - timestamp_ms, checksum, + future = FutureRecordMetadata(self.produce_future, metadata.offset, + metadata.timestamp, metadata.crc, len(key) if key is not None else -1, len(value) if value is not None else -1) return future diff --git a/kafka/record/legacy_records.py b/kafka/record/legacy_records.py index 98c8e30..055914c 100644 --- a/kafka/record/legacy_records.py +++ b/kafka/record/legacy_records.py @@ -110,6 +110,8 @@ class LegacyRecordBase(object): LOG_APPEND_TIME = 1 CREATE_TIME = 0 + NO_TIMESTAMP = -1 + class LegacyRecordBatch(ABCRecordBatch, LegacyRecordBase): @@ -333,10 +335,14 @@ class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase): # Check types if type(offset) != int: raise TypeError(offset) - if timestamp is None: + if self._magic == 0: + timestamp = self.NO_TIMESTAMP + elif timestamp is None: timestamp = int(time.time() * 1000) elif type(timestamp) != int: - raise TypeError(timestamp) + raise TypeError( + "`timestamp` should be int, but {} provided".format( + type(timestamp))) if not (key is None or isinstance(key, (bytes, bytearray, memoryview))): raise TypeError( @@ -351,7 +357,7 @@ class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase): size = self.size_in_bytes(offset, timestamp, key, value) # We always allow at least one record to be appended if offset != 0 and pos + size >= self._batch_size: - return None, 0 + return None # Allocate proper buffer length self._buffer.extend(bytearray(size)) @@ -359,7 +365,7 @@ class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase): # Encode message crc = self._encode_msg(pos, offset, timestamp, key, value) - return crc, size + return LegacyRecordMetadata(offset, crc, size, timestamp) def _encode_msg(self, start_pos, offset, timestamp, key, value, attributes=0): @@ -484,3 +490,37 @@ class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase): cls.record_size(magic, key, value) ) return cls.LOG_OVERHEAD + cls.record_size(magic, key, value) + + +class LegacyRecordMetadata(object): + + __slots__ = ("_crc", "_size", "_timestamp", "_offset") + + def __init__(self, offset, crc, size, timestamp): + self._offset = offset + self._crc = crc + self._size = size + self._timestamp = timestamp + + @property + def offset(self): + return self._offset + + @property + def crc(self): + return self._crc + + @property + def size(self): + return self._size + + @property + def timestamp(self): + return self._timestamp + + def __repr__(self): + return ( + "LegacyRecordMetadata(offset={!r}, crc={!r}, size={!r}," + " timestamp={!r})".format( + self._offset, self._crc, self._size, self._timestamp) + ) diff --git a/kafka/record/memory_records.py b/kafka/record/memory_records.py index c6a28be..4ed992c 100644 --- a/kafka/record/memory_records.py +++ b/kafka/record/memory_records.py @@ -131,14 +131,13 @@ class MemoryRecordsBuilder(object): return None, 0 offset = self._next_offset - checksum, actual_size = self._builder.append( - offset, timestamp, key, value) + metadata = self._builder.append(offset, timestamp, key, value) # Return of 0 size means there's no space to add a new message - if actual_size == 0: - return None, 0 + if metadata is None: + return None self._next_offset += 1 - return checksum, actual_size + return metadata def close(self): # This method may be called multiple times on the same batch |
