From c988ef073d0092c8335bb4e4fd0950187143515f Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 1 Feb 2025 02:55:04 +0000 Subject: [PATCH 01/52] chore: remove black extension from devcontainer config --- .devcontainer/devcontainer.json | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 265da9c..60bff01 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -11,12 +11,7 @@ "postCreateCommand": "pipx install poetry", "customizations": { "vscode": { - "extensions": [ - "charliermarsh.ruff", - "ms-python.python", - "ms-python.isort", - "ms-python.black-formatter" - ] + "extensions": ["charliermarsh.ruff", "ms-python.python"] } } } From 723c8d8a80ebdfb1f3a03981647aa9aacaec4447 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 04:11:30 +0000 Subject: [PATCH 02/52] build: add dev image target and compose service The dev service can be used for ad-hoc dev tasks in a specific Python version environment. e.g: docker compose run --rm dev-py39 --- docker-bake.hcl | 14 ++++++++++++++ docker-compose.yml | 13 +++++++++++++ 2 files changed, 27 insertions(+) diff --git a/docker-bake.hcl b/docker-bake.hcl index e2c4387..bb3941a 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -30,6 +30,20 @@ function "get_py_image_tag" { py_versions = ["3.9", "3.10", "3.11", "3.12", "3.13"] +target "dev" { + name = "dev_py${replace(py, ".", "")}" + matrix = { + py = py_versions, + } + args = { + PYTHON_VER = get_py_image_tag(py) + REPORT_CODE_COVERAGE = REPORT_CODE_COVERAGE + REPORT_CODE_BRANCH_COVERAGE = REPORT_CODE_BRANCH_COVERAGE + } + target = "poetry" + tags = ["ghcr.io/h4l/denokv-python/dev:py${replace(py, ".", "")}"] +} + target "test" { name = "test_py${replace(py, ".", "")}" matrix = { diff --git a/docker-compose.yml b/docker-compose.yml index 175ffa2..1f70ae7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,6 +11,19 @@ services: networks: - devcontainer_denokv_python + dev-py39: + profiles: [dev-py39] + image: ghcr.io/h4l/denokv-python/dev:py39 + volumes: + - workspace:/workspaces + working_dir: /workspaces/denokv-python + environment: + PYTHONPATH: /workspaces/denokv-python/src + command: poetry run ipython + networks: + - devcontainer_denokv_python + + networks: devcontainer_denokv_python: external: true From ed55251ce8cf2fbc15a2949c774dc686c2fff7ce Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 04:21:31 +0000 Subject: [PATCH 03/52] feat: support Generic TypedDict in _pycompat.typing Python versions before 3.11 don't allow TypedDict subclasses to also inherit from Generic at runtime. --- src/denokv/_pycompat/typing.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/denokv/_pycompat/typing.py b/src/denokv/_pycompat/typing.py index f809a42..3d81df6 100644 --- a/src/denokv/_pycompat/typing.py +++ b/src/denokv/_pycompat/typing.py @@ -10,15 +10,16 @@ from dataclasses import dataclass from dataclasses import field -from typing import IO as IO -from typing import TYPE_CHECKING as TYPE_CHECKING # Everything that exist in typing >=py39 except: # - ByteString (deprecated) # - overload (ruff does not recognise it when re-exported) # - Literal (ruff does not recognise it when re-exported) # - Handled below due to runtime differences: -# - TypeVar +# - TypeVar (does not support default argument pre py313) +# - TypedDict (does not support generics pre py311) +from typing import IO as IO +from typing import TYPE_CHECKING as TYPE_CHECKING from typing import AbstractSet as AbstractSet from typing import Annotated as Annotated from typing import Any as Any @@ -79,7 +80,6 @@ from typing import TextIO as TextIO from typing import Tuple as Tuple from typing import Type as Type -from typing import TypedDict as TypedDict from typing import Union as Union from typing import ValuesView as ValuesView from typing import cast as cast @@ -167,6 +167,18 @@ def override(method, /): return method +if TYPE_CHECKING: + from typing_extensions import TypedDict as TypedDict +else: + + class TypedDict(dict): + def __new__(cls, *args, **kwargs): + return dict(*args, **kwargs) + + @classmethod + def __init_subclass__(cls, total: bool = True) -> None: ... + + def assert_never(value: Never, /) -> Never: """Assert to the type checker that a line of code is unreachable.""" raise AssertionError(f"Expected code to be unreachable, but got: {value!r}") From 61ca0f26da0a7c25e5ba2452f8ce5ca488ee5f0a Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 04:59:02 +0000 Subject: [PATCH 04/52] feat: add explicit NotSet constant We'll use this instead of ellipsis to detect arguments that are not set in a call. This is needed to distinguish overloads in some cases. --- src/denokv/_pycompat/types.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 src/denokv/_pycompat/types.py diff --git a/src/denokv/_pycompat/types.py b/src/denokv/_pycompat/types.py new file mode 100644 index 0000000..355f359 --- /dev/null +++ b/src/denokv/_pycompat/types.py @@ -0,0 +1,18 @@ +from enum import Enum +from typing import Literal + +from denokv._pycompat.typing import TypeAlias + + +class NotSetEnum(Enum): + NotSet = "NotSet" + """ + Sentinel value to use as an argument default. + + It's purpose is to differentiate the argument not being set from an explicit + None value (or similar). + """ + + +NotSetType: TypeAlias = Literal[NotSetEnum.NotSet] +NotSet = NotSetEnum.NotSet From e5c7d8e91509addb0d43f8ccfe2b3e8dca303ec7 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Thu, 10 Oct 2024 04:44:41 +0000 Subject: [PATCH 05/52] test: implement atomic_write on MockKvDb We could get by without a functioning mock of the DB, but I find it valuable to implement it to make sure I'm not missing something about the possible functionality. Plus it'll make testing the Kv API easier. --- test/denokv_testing.py | 455 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 440 insertions(+), 15 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index e257ca2..d4709b7 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -4,7 +4,9 @@ import sys from base64 import b16decode from base64 import b16encode +from collections import deque from dataclasses import dataclass +from dataclasses import field from datetime import datetime from datetime import timedelta from itertools import groupby @@ -12,24 +14,36 @@ from uuid import UUID import v8serialize +import v8serialize.encode +from fdb.tuple import pack from fdb.tuple import unpack +from denokv._datapath_pb2 import AtomicWrite +from denokv._datapath_pb2 import AtomicWriteOutput +from denokv._datapath_pb2 import AtomicWriteStatus +from denokv._datapath_pb2 import Enqueue from denokv._datapath_pb2 import KvEntry as ProtobufKvEntry from denokv._datapath_pb2 import KvValue +from denokv._datapath_pb2 import Mutation +from denokv._datapath_pb2 import MutationType from denokv._datapath_pb2 import ReadRange from denokv._datapath_pb2 import ReadRangeOutput from denokv._datapath_pb2 import ValueEncoding +from denokv._pycompat.dataclasses import slots_if310 +from denokv._pycompat.protobuf import enum_name from denokv._pycompat.typing import Any from denokv._pycompat.typing import ClassVar from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import NamedTuple from denokv._pycompat.typing import Sequence +from denokv._pycompat.typing import TypeIs from denokv._pycompat.typing import TypeVar from denokv.auth import DatabaseMetadata from denokv.auth import EndpointInfo from denokv.datapath import AnyKvKey from denokv.datapath import KvKeyTuple +from denokv.datapath import increment_packed_key from denokv.datapath import is_kv_key_tuple from denokv.datapath import pack_key from denokv.datapath import parse_protobuf_kv_entry @@ -37,6 +51,7 @@ from denokv.kv import AnyCursorFormat from denokv.kv import KvEntry from denokv.kv import KvU64 +from denokv.kv import LimitExceededPolicy from denokv.kv import ListContext from denokv.kv import VersionStamp from denokv.kv_keys import KvKey @@ -53,6 +68,26 @@ v8_decoder = v8serialize.Decoder() +def v8_encode_int_as_bigint( + value: object, + ctx: v8serialize.encode.EncodeContext, + next: v8serialize.encode.EncodeNextFn, +) -> None: + if isinstance(value, int): + ctx.stream.write_bigint(value) + else: + next(value) + + +# The default v8serialize encoder encodes int as number when it fits in the +# +/- 2**53 - 1 range which float64 can represent exactly. We want to encode int +# as bigint. +# TODO: add an explicit tagged JSBigInt type to v8serialize +v8_bigint_encoder = v8serialize.Encoder( + encode_steps=[v8_encode_int_as_bigint, *v8serialize.default_encode_steps] +) + + def assume_ok(result: Result[T, E]) -> T: if is_ok(result): return result.value @@ -88,27 +123,48 @@ def mk_db_meta(endpoints: Sequence[EndpointInfo]) -> DatabaseMetadata: ) +@dataclass(**slots_if310(), frozen=True) +class KvWriteValue: + data: bytes + encoding: ValueEncoding + expire_at_ms: int = field(default=0) + + @staticmethod + def tombstone() -> KvWriteValue: + return KvWriteValue(b"", ValueEncoding.VE_UNSPECIFIED, expire_at_ms=-1) + + class MockKvDbEntry(NamedTuple): key: bytes versionstamp: int encoding: ValueEncoding - value: bytes + data: bytes + expire_at_ms: int + + +class MockKvDbMessage(NamedTuple): + payload: object + deadline_ms: int + keys_if_undelivered: Sequence[KvKey] + backoff_schedule: Sequence[int] @dataclass class MockKvDb: entries: list[MockKvDbEntry] next_version: int + queued_messages: deque[MockKvDbMessage] - def __init__(self, entries: Iterable[tuple[bytes, KvValue]] = ()) -> None: + def __init__(self, entries: Iterable[tuple[bytes, KvWriteValue]] = ()) -> None: self.clear() self.extend(entries) def clear(self) -> None: self.entries = [] self.next_version = 0 + self.queued_messages = deque() - def extend(self, entries: Iterable[tuple[bytes, KvValue]]) -> None: + def extend(self, entries: Iterable[tuple[bytes, KvWriteValue]]) -> None: version = self.next_version self.next_version += 1 @@ -117,28 +173,39 @@ def extend(self, entries: Iterable[tuple[bytes, KvValue]]) -> None: key=key, versionstamp=version, encoding=kv_value.encoding, - value=kv_value.data, + data=kv_value.data, + expire_at_ms=kv_value.expire_at_ms, ) for (key, kv_value) in entries ) self.entries.sort(key=lambda e: (e.key, e.versionstamp)) def _read_range( - self, start: bytes, end: bytes, limit: int, reverse: bool + self, start: bytes, end: bytes, limit: int, reverse: bool, current_time_ms: int ) -> Sequence[MockKvDbEntry]: assert limit >= 0 matches = [e for e in self.entries if start <= e.key < end] latest_matches = [ - list(versions)[-1] - for (k, versions) in groupby(matches, key=lambda m: m.key) + ver + for ver in ( + list(versions)[-1] + for (k, versions) in groupby(matches, key=lambda m: m.key) + ) + if (ver.expire_at_ms == 0 or ver.expire_at_ms > current_time_ms) ] if reverse: latest_matches = list(reversed(latest_matches)) return latest_matches[:limit] - def snapshot_read_range(self, read: ReadRange) -> ReadRangeOutput: + def snapshot_read_range( + self, read: ReadRange, current_time_ms: int = 0 + ) -> ReadRangeOutput: entries = self._read_range( - start=read.start, end=read.end, limit=read.limit, reverse=read.reverse + start=read.start, + end=read.end, + limit=read.limit, + reverse=read.reverse, + current_time_ms=current_time_ms, ) return ReadRangeOutput( values=[ @@ -146,20 +213,378 @@ def snapshot_read_range(self, read: ReadRange) -> ReadRangeOutput: key=e.key, versionstamp=bytes(VersionStamp(e.versionstamp)), encoding=e.encoding, - value=e.value, + value=e.data, ) for e in entries ] ) + @overload + def _read_single( + self, key: bytes, current_time_ms: int, pending_entries: None = None + ) -> MockKvDbEntry | None: ... + + @overload + def _read_single( + self, + key: bytes, + current_time_ms: int, + pending_entries: Mapping[bytes, KvWriteValue], + ) -> MockKvDbEntry | KvWriteValue | None: ... + + def _read_single( + self, + key: bytes, + current_time_ms: int, + pending_entries: Mapping[bytes, KvWriteValue] | None = None, + ) -> MockKvDbEntry | KvWriteValue | None: + if pending_entries and (pending := pending_entries.get(key)): + if pending.expire_at_ms != 0 and pending.expire_at_ms <= current_time_ms: + return None + return pending + matches = self._read_range( + start=key, + end=increment_packed_key(key), + limit=1, + reverse=False, + current_time_ms=current_time_ms, + ) + assert len(matches) < 2 + return matches[0] if matches else None + + def atomic_write( + self, write: AtomicWrite, current_time_ms: int = 0 + ) -> AtomicWriteOutput: + failed_checks: list[int] = [] + for i, check in enumerate(write.checks): + checked_entry = self._read_single( + check.key, current_time_ms=current_time_ms + ) + + if len(check.versionstamp) == 0: + if checked_entry is not None: + failed_checks.append(i) + elif len(check.versionstamp) == 10: + if checked_entry is None: + failed_checks.append(i) + else: + if VersionStamp(checked_entry.versionstamp) != VersionStamp( + check.versionstamp + ): + failed_checks.append(i) + else: + raise ValueError( + f"Check versionstamp is not valid: {check.versionstamp!r}" + ) + + if len(failed_checks) > 0: + return AtomicWriteOutput( + status=AtomicWriteStatus.AW_CHECK_FAILURE, failed_checks=failed_checks + ) + + messages = [decode_enqueue_message(enqueue) for enqueue in write.enqueues] + + versionstamp = VersionStamp(self.next_version) + mutation_entries: dict[bytes, KvWriteValue] = {} + for mut in write.mutations: + cause: Exception | None = None + try: + key_tuple = unpack(mut.key) + key_bytes = pack(unpack(mut.key)) + except Exception as e: + key_bytes = None + cause = e + if key_bytes != mut.key: + raise ValueError(f"Mutation key is not valid: {mut.key!r}") from cause + + expires_at_ms = mut.expire_at_ms + if expires_at_ms < 0: + raise ValueError( + f"Mutation expire_at_ms cannot be negative: {mut.expire_at_ms}" + ) + + if mut.mutation_type == MutationType.M_SET: + mutation_entries[key_bytes] = KvWriteValue( + data=mut.value.data, + encoding=mut.value.encoding, + expire_at_ms=expires_at_ms, + ) + elif mut.mutation_type == MutationType.M_DELETE: + mutation_entries[key_bytes] = KvWriteValue.tombstone() + elif ( + mut.mutation_type == MutationType.M_SUM + or mut.mutation_type == MutationType.M_MIN + or mut.mutation_type == MutationType.M_MAX + ): + # Deno KV allows sum(left, right) with certain combinations of + # types: + # + # (Left is stored in the database, right is the value sent in + # the atomic operation.) + # + # Left | Right | + # ———— | KvU64 | bigint | number | + # KvU64 | yes | yes | no | + # bigint | no | yes | no | + # number | no | no | yes | + # + # min() and max() can only use KvU64 values. + + # We need to also read from mutation_entries to take into + # account values changed by preceding mutations within this + # AtomicWrite operation. + current = self._read_single( + mut.key, + current_time_ms=current_time_ms, + pending_entries=mutation_entries, + ) + operand_encoding, operand_value = decode_number_value(mut.value) + if current is None: + # float operands must have 0.0 not 0 as the default as + # cross-type sum operations are not allowed. + current_encoding, current_value = None, (type(operand_value)(0)) + else: + current_encoding, current_value = decode_number_value(current) + + op = _get_number_operator(mut, current_encoding=current_encoding) + + if not _is_allowed_op_combination( + op, + (current_encoding, current_value), + (operand_encoding, operand_value), + ): + left_desc = "{} ({})".format( + None + if current_encoding is None + else enum_name(ValueEncoding, current_encoding), + type(current_value), + ) + right_desc = "{} ({})".format( + None + if operand_encoding is None + else enum_name(ValueEncoding, operand_encoding), + type(operand_value), + ) + raise ValueError( + f"Cannot apply operation " + f"{enum_name(MutationType, mut.mutation_type)}" + f"({left_desc}, {right_desc})" + ) + + try: + result = op(current_value, operand_value) + except Exception as e: + raise ValueError( + f"Mutation is not a valid " + f"{enum_name(MutationType, mut.mutation_type)} operation: {e}" + ) from e + result_encoding = current_encoding or operand_encoding + assert result_encoding is not None + + mutation_entries[key_bytes] = KvWriteValue( + data=encode_number_value(result, result_encoding), + encoding=result_encoding, + expire_at_ms=expires_at_ms, + ) + elif mut.mutation_type == MutationType.M_SET_SUFFIX_VERSIONSTAMPED_KEY: + suffix_key = pack((*key_tuple, str(versionstamp))) + mutation_entries[suffix_key] = KvWriteValue( + data=mut.value.data, + encoding=mut.value.encoding, + expire_at_ms=expires_at_ms, + ) + else: + raise ValueError( + f"Mutation mutation_type is not valid: {mut.mutation_type}" + ) + self.extend(mutation_entries.items()) + self.queued_messages.extend(messages) + + return AtomicWriteOutput( + status=AtomicWriteStatus.AW_SUCCESS, versionstamp=versionstamp + ) + + +def _is_allowed_op_combination( + op: Callable[[float, float], float] | MutationSumOperator | None, + left: tuple[ValueEncoding | None, float], + right: tuple[ValueEncoding | None, float], +) -> TypeIs[Callable[[float, float], float] | MutationSumOperator]: + left_encoding, left_value = left + right_encoding, right_value = right + if isinstance(op, MutationSumOperator): + if left_encoding == ValueEncoding.VE_LE64: + return right_encoding == ValueEncoding.VE_LE64 or ( + right_encoding == ValueEncoding.VE_V8 and isinstance(right_value, int) + ) + elif left_encoding == ValueEncoding.VE_V8: + return type(left_value) is type(right_value) + elif left_encoding is None: + # Sum can be used with a missing left operand. + return right_encoding == ValueEncoding.VE_LE64 or ( + right_encoding == ValueEncoding.VE_V8 + and isinstance(right_value, (int, float)) + ) + elif op is min or op is max: + return ( + left_encoding == ValueEncoding.VE_LE64 + or left_encoding is None + and right_encoding == ValueEncoding.VE_LE64 + ) + raise AssertionError(f"Unexpected op combinations: {op=}, {left=}, {right=}") + + +def _get_number_operator( + mut: Mutation, *, current_encoding: ValueEncoding | None +) -> Callable[[float, float], float] | None: + if mut.mutation_type == MutationType.M_SUM: + min_ = decode_v8_number(mut.sum_min) if mut.sum_min else None + max_ = decode_v8_number(mut.sum_max) if mut.sum_max else None + if ( + min_ is not None or max_ is not None or mut.sum_clamp + ) and mut.value.encoding != ValueEncoding.VE_V8: + raise ValueError("Mutation used sum_min/sum_min with non-V8 encoding") + if min_ is not None and max_ is not None and type(min_) is not type(max_): + raise ValueError( + "Mutation used different number types for sum_min and sum_max" + ) + + if ( + current_encoding == ValueEncoding.VE_LE64 + or mut.value.encoding == ValueEncoding.VE_LE64 + ): + if mut.sum_min or mut.sum_max or mut.sum_clamp: + raise ValueError("Mutation used custom sum limit with LE64 value") + return MutationSumOperator(0, 2**64 - 1, LimitExceededPolicy.WRAP) -def encode_protobuf_kv_value(value: object) -> KvValue: + boundary = ( + LimitExceededPolicy.CLAMP if mut.sum_clamp else LimitExceededPolicy.ERROR + ) + return MutationSumOperator(min=min_, max=max_, boundary=boundary) + elif mut.mutation_type == MutationType.M_MAX: + return max + elif mut.mutation_type == MutationType.M_MIN: + return min + return None + + +@dataclass +class MutationSumOperator: + min: int | float | None + max: int | float | None + boundary: LimitExceededPolicy + + def __call__(self, left: int | float, right: int | float) -> int | float: + min, max = self.min, self.max + if type(left) is not type(right): + raise TypeError(f"left and right must be the same type: {left=}, {right=}") + if (min is not None and type(min) is not type(left)) or ( + max is not None and type(max) is not type(left) + ): + raise TypeError( + "sum min/max value is a different number type than the operand values" + ) + if type(left) is not type(right): + raise TypeError(f"left and right must be the same type: {left=}, {right=}") + result = left + right + if self.boundary is LimitExceededPolicy.WRAP: + # wrap is only used for uint64 + assert min == 0 + assert max is not None and max >= 0 + result = result % (max + 1) + elif min is not None and result < min: + if self.boundary is LimitExceededPolicy.CLAMP: + result = min + else: + assert self.boundary is LimitExceededPolicy.ERROR + raise ValueError( + f"result of sum({left}, {right}) = {result}, which is less " + f"than the minimum {min}" + ) + if max is not None and result > max: + if self.boundary is LimitExceededPolicy.CLAMP: + result = max + else: + assert self.boundary is LimitExceededPolicy.ERROR + raise ValueError( + f"result of sum({left}, {right}) = {result}, which is " + f"greater than the maximum {max}" + ) + return result + + +def decode_number_value( + entry: MockKvDbEntry | KvWriteValue | KvValue, +) -> tuple[ValueEncoding, int | float]: + if entry.encoding == ValueEncoding.VE_LE64: + return ValueEncoding.VE_LE64, KvU64(entry.data).value + elif entry.encoding == ValueEncoding.VE_V8: + value = v8serialize.loads(entry.data) + if not isinstance(value, (int, float)): + raise ValueError("entry's value is not a V8-encoded BigInt or Number") + return ValueEncoding.VE_V8, value + else: + raise ValueError("entry value is not an LE64 or V8-encoded BigInt or Number") + + +def decode_v8_number(data: bytes) -> int | float: + try: + value = v8_decoder.decodes(data) + except v8serialize.V8SerializeError as e: + raise ValueError("data is not a valid V8-serialized value") from e + if not isinstance(value, (int, float)): + raise ValueError("V8-serialized value is not a BigInt or Number") + return value + + +def encode_number_value(value: int | float, encoding: ValueEncoding) -> bytes: + if encoding == ValueEncoding.VE_V8: + return bytes(v8_bigint_encoder.encode(value)) + elif encoding == ValueEncoding.VE_LE64: + if isinstance(value, float): + raise TypeError("Cannot encode float as LE64") + return KvU64(value).to_bytes() + raise ValueError(f"encoding is not LE64 or V8: {encoding}") + + +def encode_kv_write_value(value: object, expires_at_ms: int = 0) -> KvWriteValue: if isinstance(value, KvU64): - return KvValue(data=bytes(value), encoding=ValueEncoding.VE_LE64) + return KvWriteValue( + data=bytes(value), + encoding=ValueEncoding.VE_LE64, + expire_at_ms=expires_at_ms, + ) elif isinstance(value, bytes): - return KvValue(data=value, encoding=ValueEncoding.VE_BYTES) + return KvWriteValue( + data=value, encoding=ValueEncoding.VE_BYTES, expire_at_ms=expires_at_ms + ) else: - return KvValue(data=v8serialize.dumps(value), encoding=ValueEncoding.VE_V8) + return KvWriteValue( + data=v8serialize.dumps(value), + encoding=ValueEncoding.VE_V8, + expire_at_ms=expires_at_ms, + ) + + +def decode_enqueue_message(enqueue: Enqueue) -> MockKvDbMessage: + try: + payload_value = v8_decoder.decodes(enqueue.payload) + except v8serialize.V8SerializeError as e: + raise ValueError("Enqueue payload is not a valid V8-encoded value") from e + keys_if_undelivered = list[KvKey]() + for k in enqueue.keys_if_undelivered: + try: + keys_if_undelivered.append(KvKey.from_kv_key_bytes(k)) + except ValueError as e: + raise ValueError( + f"Enqueue keys_if_undelivered contains invalid key: {k!r}" + ) from e + return MockKvDbMessage( + payload=payload_value, + backoff_schedule=list(enqueue.backoff_schedule), + deadline_ms=enqueue.deadline_ms, + keys_if_undelivered=keys_if_undelivered, + ) def add_entries( @@ -171,7 +596,7 @@ def add_entries( version = VersionStamp(db.next_version) encoded_entries = [ - (pack_key(key), encode_protobuf_kv_value(value)) for (key, value) in entries + (pack_key(key), encode_kv_write_value(value)) for (key, value) in entries ] db.extend(encoded_entries) return version From 3a21ea0996fbf7759fb61e3dfacf2baed4c35ff8 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Fri, 11 Oct 2024 06:47:51 +0000 Subject: [PATCH 06/52] feat: implement datapath.atomic_write() This is the low-level API to invoke Atomic Write requests without any hand-holding. --- src/denokv/datapath.py | 196 +++++++++++- test/test_datapath.py | 669 +++++++++++++++++++++++++++++++++++------ 2 files changed, 764 insertions(+), 101 deletions(-) diff --git a/src/denokv/datapath.py b/src/denokv/datapath.py index fd8372a..512cdae 100644 --- a/src/denokv/datapath.py +++ b/src/denokv/datapath.py @@ -18,16 +18,22 @@ from google.protobuf.message import Error as ProtobufMessageError from v8serialize import Decoder +from denokv._datapath_pb2 import AtomicWrite +from denokv._datapath_pb2 import AtomicWriteOutput +from denokv._datapath_pb2 import AtomicWriteStatus +from denokv._datapath_pb2 import Check from denokv._datapath_pb2 import KvEntry from denokv._datapath_pb2 import ReadRange from denokv._datapath_pb2 import SnapshotRead from denokv._datapath_pb2 import SnapshotReadOutput from denokv._datapath_pb2 import SnapshotReadStatus from denokv._datapath_pb2 import ValueEncoding +from denokv._pycompat.typing import AbstractSet from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable from denokv._pycompat.typing import Container from denokv._pycompat.typing import Final +from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Protocol from denokv._pycompat.typing import Type from denokv._pycompat.typing import TypeAlias @@ -174,6 +180,50 @@ class RequestUnsuccessful(DataPathDenoKvError): pass +@dataclass(init=False) +class CheckFailure(DataPathDenoKvError): + """ + The KV server could not complete an Atomic Write because of a concurrent change. + + This is an expected response to Atomic Write requests that occurs when one + or more of the checks an Atomic Write is conditional on are found to not + hold at the point that the database attempts to commit the write, because + another Atomic Write has written new version(s) of the key(s) referenced by + the check(s). The client must re-read the keys it was attempting to write, + and submit a new Atomic Write if necessary that reflects the latest state of + the keys. + """ + + all_checks: tuple[Check, ...] + """All of the Checks sent with the AtomicWrite.""" + failed_check_indexes: AbstractSet[int] + """ + The indexes of Checks in all_checks keys whose versionstamp check failed. + + The set is sorted with ascending iteration order. + """ + + def __init__( + self, + message: str, + all_checks: Iterable[Check], + failed_check_indexes: Iterable[int], + *args: object, + endpoint: EndpointInfo, + ) -> None: + super().__init__(message, *args, endpoint=endpoint, auto_retry=AutoRetry.NEVER) + + self.all_checks = tuple(all_checks) + if len(self.all_checks) == 0: + raise ValueError("all_checks is empty") + ordered_indexes = sorted(failed_check_indexes) + if len(ordered_indexes) == 0: + raise ValueError("failed_check_indexes is empty") + if ordered_indexes[0] < 0 or ordered_indexes[-1] >= len(self.all_checks): + raise IndexError("failed_check_indexes contains out-of-bounds index") + self.failed_check_indexes = {i: True for i in ordered_indexes}.keys() + + DataPathError: TypeAlias = Union[ EndpointNotUsable, RequestUnsuccessful, ResponseUnsuccessful, ProtocolViolation ] @@ -181,7 +231,7 @@ class RequestUnsuccessful(DataPathDenoKvError): class _DataPathRequestKind(Enum): SnapshotRead = "snapshot_read" - SnapshotWrite = "snapshot_write" + AtomicWrite = "atomic_write" Watch = "watch" @@ -373,6 +423,150 @@ async def snapshot_read( return Ok(read_output) +AtomicWriteResult: TypeAlias = Result[bytes, Union[CheckFailure, DataPathError]] + + +async def atomic_write( + *, + session: aiohttp.ClientSession, + meta: DatabaseMetadata, + endpoint: EndpointInfo, + write: AtomicWrite, +) -> AtomicWriteResult: + """ + Perform a Data Path Atomic Write request against a database endpoint. + + The endpoint must have strong consistency. The write is conditional on the + checks of the provided AtomicWrite passing. Callers must expect to need to + retry a write when these checks are not satisfied due to another write + having modified a checked key. The result is an Err containing a + [CheckFailure](`denokv.datapath.CheckFailure`) when checks fail. + + When the write succeeds, the return value is the 10-byte versionstamp of the + committed version. + + The request does not retry on error conditions, the caller is responsible + for retrying if they wish. The Err results report whether retries are + permitted by the Data Path protocol spec using their `auto_retry: AutoRetry` + field. + + Returns + ------- + Ok[bytes]: + 10-byte versionstamp when the write succeeds + Err[CheckFailure]: + When one or more of the AtomicWrite's checks are not satisfied. + Err[ProtocolViolation]: + When the endpoint sends an unexpected response violating the protocol + spec. + Err[RequestUnsuccessful]: + When the request cannot be sent, e.g. due to a network error. + Err[ResponseUnsuccessful]: + When the request is not handled successfully by the endpoint, e.g. due + to a the service being unavailable. + """ + if endpoint.consistency is not ConsistencyLevel.STRONG: + raise ValueError( + f"endpoints used with atomic_write must be " + f"{ConsistencyLevel.STRONG!r}: {endpoint}" + ) + + result = await _datapath_request( + kind=_DataPathRequestKind.AtomicWrite, + session=session, + meta=meta, + endpoint=endpoint, + request_body=write.SerializeToString(), + handle_response=_response_body_bytes, + ) + if isinstance(result, Err): + return result + response_bytes = result.value + + try: + write_output = AtomicWriteOutput.FromString(response_bytes) + except ProtobufMessageError as e: + err = ProtocolViolation( + "Server responded to Data Path request with invalid AtomicWriteOutput", + data=response_bytes, + endpoint=endpoint, + ) + err.__cause__ = e + return Err(err) + + if write_output.status == AtomicWriteStatus.AW_SUCCESS: + if len(write_output.failed_checks) != 0: + return Err( + ProtocolViolation( + "Server responded to Data Path Atomic Write with " + "SUCCESS containing failed checks", + data=write_output, + endpoint=endpoint, + ) + ) + if len(write_output.versionstamp) != 10: + return Err( + ProtocolViolation( + "Server responded to Data Path Atomic Write with " + "SUCCESS containing an invalid versionstamp", + data=write_output, + endpoint=endpoint, + ) + ) + return Ok(write_output.versionstamp) + elif write_output.status == AtomicWriteStatus.AW_CHECK_FAILURE: + try: + return Err( + CheckFailure( + "Not all checks required by the Atomic Write passed", + all_checks=write.checks, + failed_check_indexes=write_output.failed_checks, + endpoint=endpoint, + ) + ) + except IndexError as e: + err = ProtocolViolation( + "Server responded to Data Path Atomic Write with " + "CHECK_FAILURE referencing out-of-bounds check index", + data=write_output, + endpoint=endpoint, + ) + err.__cause__ = e + return Err(err) + except ValueError as e: + err = ProtocolViolation( + "Server responded to Data Path Atomic Write with " + "CHECK_FAILURE containing no failed checks", + data=write_output, + endpoint=endpoint, + ) + err.__cause__ = e + return Err(err) + elif write_output.status == AtomicWriteStatus.AW_WRITE_DISABLED: + return Err( + EndpointNotUsable( + "Server responded to Data Path request indicating it is cannot " + "write this database", + endpoint=endpoint, + reason=EndpointNotUsableReason.DISABLED, + ) + ) + else: + msg = ( + "UNSPECIFIED" + if write_output.status == AtomicWriteStatus.AW_UNSPECIFIED + else f"unknown: {write_output.status}" + ) + return Err( + ProtocolViolation( + f"Server responded to Data Path Atomic Write request with " + f"status {msg}", + data=write_output, + endpoint=endpoint, + ) + ) + + def is_kv_key_tuple(tup: object) -> TypeGuard[KvKeyTuple]: """Check if a tuple only contains valid KV key tuple type values.""" return isinstance(tup, tuple) and all( diff --git a/test/test_datapath.py b/test/test_datapath.py index 8ab2248..6bbb167 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -13,8 +13,10 @@ import v8serialize from aiohttp import web from aiohttp.test_utils import TestClient as _TestClient +from aiohttp.typedefs import Handler from fdb.tuple import pack from fdb.tuple import unpack +from google.protobuf.message import Message from hypothesis import example from hypothesis import given from hypothesis import strategies as st @@ -22,7 +24,14 @@ from yarl import URL from denokv import datapath +from denokv._datapath_pb2 import AtomicWrite +from denokv._datapath_pb2 import AtomicWriteOutput +from denokv._datapath_pb2 import AtomicWriteStatus +from denokv._datapath_pb2 import Check from denokv._datapath_pb2 import KvEntry as ProtobufKvEntry +from denokv._datapath_pb2 import KvValue +from denokv._datapath_pb2 import Mutation +from denokv._datapath_pb2 import MutationType from denokv._datapath_pb2 import ReadRange from denokv._datapath_pb2 import ReadRangeOutput from denokv._datapath_pb2 import SnapshotRead @@ -33,13 +42,16 @@ from denokv._pycompat.typing import Callable from denokv._pycompat.typing import Final from denokv._pycompat.typing import Mapping +from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeAlias +from denokv._pycompat.typing import TypeVar from denokv._pycompat.typing import cast from denokv.auth import ConsistencyLevel from denokv.auth import DatabaseMetadata from denokv.auth import EndpointInfo from denokv.datapath import KV_KEY_PIECE_TYPES from denokv.datapath import AutoRetry +from denokv.datapath import CheckFailure from denokv.datapath import DataPathDenoKvError from denokv.datapath import EndpointNotUsable from denokv.datapath import EndpointNotUsableReason @@ -48,6 +60,8 @@ from denokv.datapath import ProtocolViolation from denokv.datapath import RequestUnsuccessful from denokv.datapath import ResponseUnsuccessful +from denokv.datapath import _DataPathRequestKind +from denokv.datapath import atomic_write from denokv.datapath import increment_packed_key from denokv.datapath import is_any_kv_key from denokv.datapath import is_kv_key_tuple @@ -62,15 +76,20 @@ from denokv.kv_keys import KvKey from denokv.result import Err from denokv.result import Ok +from denokv.result import Result +from denokv.result import is_ok from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries from test.denokv_testing import nextafter from test.denokv_testing import unsafe_parse_protobuf_kv_entry +from test.denokv_testing import v8_bigint_encoder TestClient: TypeAlias = _TestClient[web.Request, web.Application] pytest_mark_asyncio = pytest.mark.asyncio() +MessageT = TypeVar("MessageT", bound=Message) + @pytest.fixture def mock_db() -> MockKvDb: @@ -101,7 +120,7 @@ def get_server_version(request: web.Request) -> Literal[1, 2, 3]: raise AssertionError("handler is not registered at /v[123]/ URL path") return cast(Literal[1, 2, 3], version) - async def strong_snapshot_read(request: web.Request) -> web.Response: + def validate_request(request: web.Request) -> None: server_version = get_server_version(request) if request.method != "POST": @@ -131,18 +150,27 @@ async def strong_snapshot_read(request: web.Request) -> web.Response: f"talking to a v{server_version} server" ) from None - req_body_bytes = await request.read() + def parse_protobuf_body( + body_bytes: bytes, message_type: type[MessageT] + ) -> MessageT: + message = message_type() try: - read = SnapshotRead() - count = read.ParseFromString(req_body_bytes) - if len(req_body_bytes) != count: + count = message.ParseFromString(body_bytes) + if len(body_bytes) != count: raise ValueError( - f"{len(req_body_bytes) - count} trailing bytes after SnapshotRead" + f"{len(body_bytes) - count} trailing bytes after " + f"{message_type.__name__}" ) except Exception as e: raise web.HTTPBadRequest( - body=f"body is not a valid SnapshotRead message: {e}" + body=f"body is not a valid {message_type.__name__} message: {e}" ) from e + return message + + # Valid snapshot_read handler + async def strong_snapshot_read(request: web.Request) -> web.Response: + validate_request(request) + read = parse_protobuf_body(await request.read(), SnapshotRead) read_result = SnapshotReadOutput( status=SnapshotReadStatus.SR_SUCCESS, @@ -155,6 +183,24 @@ async def strong_snapshot_read(request: web.Request) -> web.Response: body=read_result.SerializeToString(), ) + # Valid atomic_write handler + async def atomic_write(request: web.Request) -> web.Response: + validate_request(request) + + write = parse_protobuf_body(await request.read(), AtomicWrite) + + try: + write_result = mock_db.atomic_write(write) + except ValueError as e: + raise web.HTTPBadRequest(body=f"SnapshotWrite is not valid: {e}") from e + + return web.Response( + status=200, + content_type="application/x-protobuf", + body=write_result.SerializeToString(), + ) + + # Generic Data Path errors async def violation_2xx_text_body(request: web.Request) -> web.Response: """Only 200, not 2xx is the permitted successful response status.""" return web.Response( @@ -197,6 +243,7 @@ async def violation_invalid_protobuf_body(request: web.Request) -> web.Response: body=b"\x00foo", ) + # Invalid snapshot_read handlers async def unusable_disabled_via_read_disabled(request: web.Request) -> web.Response: return web.Response( status=200, @@ -255,23 +302,117 @@ async def violation_wrong_ranges(request: web.Request) -> web.Response: ).SerializeToString(), ) + # Invalid atomic_write handlers + async def violation_atomic_write_success_with_failed_checks( + request: web.Request, + ) -> web.Response: + write = AtomicWrite() + write.ParseFromString(await request.read()) + assert len(write.checks) > 0, "write request must have at least one check" + + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=AtomicWriteStatus.AW_SUCCESS, + failed_checks=[0], + versionstamp=VersionStamp(0), + ).SerializeToString(), + ) + + async def violation_atomic_write_success_with_invalid_versionstamp( + request: web.Request, + ) -> web.Response: + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=AtomicWriteStatus.AW_SUCCESS, versionstamp=b"\xff" + ).SerializeToString(), + ) + + async def violation_atomic_write_check_failure_with_out_of_bounds_index( + request: web.Request, + ) -> web.Response: + write = AtomicWrite() + write.ParseFromString(await request.read()) + assert len(write.checks) > 0, "write request must have at least one check" + + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=AtomicWriteStatus.AW_CHECK_FAILURE, + failed_checks=[len(write.checks)], + ).SerializeToString(), + ) + + async def violation_atomic_write_check_failure_without_failed_checks( + request: web.Request, + ) -> web.Response: + write = AtomicWrite() + write.ParseFromString(await request.read()) + assert len(write.checks) > 0, "write request must have at least one check" + + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=AtomicWriteStatus.AW_CHECK_FAILURE + ).SerializeToString(), + ) + + async def violation_atomic_write_unspecified_status( + request: web.Request, + ) -> web.Response: + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=AtomicWriteStatus.AW_UNSPECIFIED + ).SerializeToString(), + ) + + async def violation_atomic_write_invalid_status( + request: web.Request, + ) -> web.Response: + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=max(AtomicWriteStatus.values()) + 1 # type: ignore[attr-defined] + ).SerializeToString(), + ) + + async def unusable_atomic_write(request: web.Request) -> web.Response: + return web.Response( + status=200, + content_type="application/x-protobuf", + body=AtomicWriteOutput( + status=AtomicWriteStatus.AW_WRITE_DISABLED + ).SerializeToString(), + ) + + def add_datapath_post(app: web.Application, path: str, handler: Handler) -> None: + assert path.startswith("/") and not path.endswith("/") + for req_kind in _DataPathRequestKind: + app.router.add_post(f"{path}/{req_kind.value}", handler) + app = web.Application() - app.router.add_post( - "/violation_2xx_text_body/snapshot_read", violation_2xx_text_body - ) - app.router.add_post( - "/violation_2xx_protobuf_body/snapshot_read", violation_2xx_protobuf_body - ) - app.router.add_post("/violation_307/snapshot_read", violation_307) - app.router.add_post("/errors_401/snapshot_read", errors_401) - app.router.add_post("/errors_503/snapshot_read", errors_503) - app.router.add_post( - "/violation_bad_content_type/snapshot_read", violation_bad_content_type - ) - app.router.add_post( - "/violation_invalid_protobuf_body/snapshot_read", - violation_invalid_protobuf_body, + + # Generic error endpoints + add_datapath_post(app, "/violation_2xx_text_body", violation_2xx_text_body) + + add_datapath_post(app, "/violation_2xx_protobuf_body", violation_2xx_protobuf_body) + add_datapath_post(app, "/violation_307", violation_307) + add_datapath_post(app, "/errors_401", errors_401) + add_datapath_post(app, "/errors_503", errors_503) + add_datapath_post(app, "/violation_bad_content_type", violation_bad_content_type) + add_datapath_post( + app, "/violation_invalid_protobuf_body", violation_invalid_protobuf_body ) + + # snapshot_read only error endpoints app.router.add_post( "/unusable_disabled_via_read_disabled/snapshot_read", unusable_disabled_via_read_disabled, @@ -292,9 +433,39 @@ async def violation_wrong_ranges(request: web.Request) -> web.Response: "/violation_wrong_ranges/snapshot_read", violation_wrong_ranges, ) + + # atomic_write only error endpoints + app.router.add_post( + "/success_with_failed_checks/atomic_write", + violation_atomic_write_success_with_failed_checks, + ) + app.router.add_post( + "/success_with_invalid_versionstamp/atomic_write", + violation_atomic_write_success_with_invalid_versionstamp, + ) + app.router.add_post( + "/check_failure_with_out_of_bounds_index/atomic_write", + violation_atomic_write_check_failure_with_out_of_bounds_index, + ) + app.router.add_post( + "/check_failure_without_failed_checks/atomic_write", + violation_atomic_write_check_failure_without_failed_checks, + ) + app.router.add_post("/unusable/atomic_write", unusable_atomic_write) + app.router.add_post( + "/unspecified_status/atomic_write", violation_atomic_write_unspecified_status + ) + app.router.add_post( + "/invalid_status/atomic_write", violation_atomic_write_invalid_status + ) + + # Working endpoints app.router.add_post("/v1/consistency/strong/snapshot_read", strong_snapshot_read) app.router.add_post("/v2/consistency/strong/snapshot_read", strong_snapshot_read) app.router.add_post("/v3/consistency/strong/snapshot_read", strong_snapshot_read) + app.router.add_post("/v1/consistency/strong/atomic_write", atomic_write) + app.router.add_post("/v2/consistency/strong/atomic_write", atomic_write) + app.router.add_post("/v3/consistency/strong/atomic_write", atomic_write) return app @@ -332,66 +503,109 @@ def make_database_metadata_for_endpoint( @pytest.mark.parametrize( - "path, mk_error", + "datapath_request_fn", [ - ( - "/violation_2xx_text_body", - lambda endpoint: ResponseUnsuccessful( - "Server responded to Data Path request with unexpected HTTP status", - status=201, - body_text="Strange behaviour.", - auto_retry=AutoRetry.NEVER, - endpoint=endpoint, - ), + pytest.param( + functools.partial(snapshot_read, read=SnapshotRead()), id="snapshot_read" ), - ( - "/violation_2xx_protobuf_body", - lambda endpoint: ResponseUnsuccessful( - "Server responded to Data Path request with unexpected HTTP status", - status=201, - body_text="Response content-type: application/x-protobuf", - auto_retry=AutoRetry.NEVER, - endpoint=endpoint, - ), + pytest.param( + functools.partial(atomic_write, write=AtomicWrite()), id="atomic_write" ), - ( - "/violation_307", - lambda endpoint: ResponseUnsuccessful( - "Server responded to Data Path request with unexpected HTTP status", - status=307, - body_text="testdb: redirecting to /foo", - auto_retry=AutoRetry.NEVER, - endpoint=endpoint, - ), + ], +) +@pytest_mark_asyncio +async def test_datapath_request_function__handles_network_error( + client: TestClient, + unused_tcp_port_factory: Callable[[], int], + datapath_request_fn: functools.partial[Awaitable[Result[object, object]]], +) -> None: + server_url = client.make_url("/") + server_url = server_url.with_port(unused_tcp_port_factory()) + + meta, endpoint = make_database_metadata_for_endpoint(endpoint_url=server_url) + + # will fail to connect to URL with nothing listening on the port + result = await datapath_request_fn( + session=client.session, + meta=meta, + endpoint=endpoint, + ) + assert isinstance(result, Err) + assert result.error == RequestUnsuccessful( + "Failed to make Data Path HTTP request to KV server", + endpoint=endpoint, + auto_retry=AutoRetry.AFTER_BACKOFF, + ) + + +generic_datapath_unsuccessful_response_params: Sequence[ + tuple[str, Callable[[EndpointInfo], DataPathDenoKvError]] +] = [ + ( + "/violation_2xx_text_body", + lambda endpoint: ResponseUnsuccessful( + "Server responded to Data Path request with unexpected HTTP status", + status=201, + body_text="Strange behaviour.", + auto_retry=AutoRetry.NEVER, + endpoint=endpoint, ), - ( - "/errors_401", - lambda endpoint: ResponseUnsuccessful( - "Server rejected Data Path request indicating client error", - status=401, - body_text="testdb: Unauthorized", - auto_retry=AutoRetry.NEVER, - endpoint=endpoint, - ), + ), + ( + "/violation_2xx_protobuf_body", + lambda endpoint: ResponseUnsuccessful( + "Server responded to Data Path request with unexpected HTTP status", + status=201, + body_text="Response content-type: application/x-protobuf", + auto_retry=AutoRetry.NEVER, + endpoint=endpoint, ), - ( - "/errors_503", - lambda endpoint: ResponseUnsuccessful( - "Server failed to respond to Data Path request indicating server error", - status=503, - body_text="testdb: Unavailable", - auto_retry=AutoRetry.AFTER_BACKOFF, - endpoint=endpoint, - ), + ), + ( + "/violation_307", + lambda endpoint: ResponseUnsuccessful( + "Server responded to Data Path request with unexpected HTTP status", + status=307, + body_text="testdb: redirecting to /foo", + auto_retry=AutoRetry.NEVER, + endpoint=endpoint, ), - ( - "/violation_bad_content_type", - lambda endpoint: ProtocolViolation( - "response content-type is not application/x-protobuf: text/plain", - data="text/plain", - endpoint=endpoint, - ), + ), + ( + "/errors_401", + lambda endpoint: ResponseUnsuccessful( + "Server rejected Data Path request indicating client error", + status=401, + body_text="testdb: Unauthorized", + auto_retry=AutoRetry.NEVER, + endpoint=endpoint, ), + ), + ( + "/errors_503", + lambda endpoint: ResponseUnsuccessful( + "Server failed to respond to Data Path request indicating server error", + status=503, + body_text="testdb: Unavailable", + auto_retry=AutoRetry.AFTER_BACKOFF, + endpoint=endpoint, + ), + ), + ( + "/violation_bad_content_type", + lambda endpoint: ProtocolViolation( + "response content-type is not application/x-protobuf: text/plain", + data="text/plain", + endpoint=endpoint, + ), + ), +] + + +@pytest.mark.parametrize( + "path, mk_error", + [ + *generic_datapath_unsuccessful_response_params, ( "/violation_invalid_protobuf_body", lambda endpoint: ProtocolViolation( @@ -470,31 +684,6 @@ async def test_snapshot_read__handles_unsuccessful_responses( assert result.error == error -@pytest_mark_asyncio -async def test_snapshot_read__handles_network_error( - client: TestClient, unused_tcp_port_factory: Callable[[], int] -) -> None: - server_url = client.make_url("/") - server_url = server_url.with_port(unused_tcp_port_factory()) - - meta, endpoint = make_database_metadata_for_endpoint(endpoint_url=server_url) - read = SnapshotRead(ranges=[]) - - # will fail to connect to URL with nothing listening on the port - result = await snapshot_read( - session=client.session, - meta=meta, - endpoint=endpoint, - read=read, - ) - assert isinstance(result, Err) - assert result.error == RequestUnsuccessful( - "Failed to make Data Path HTTP request to KV server", - endpoint=endpoint, - auto_retry=AutoRetry.AFTER_BACKOFF, - ) - - @pytest.mark.parametrize( "read_ranges, result_ranges", [ @@ -635,6 +824,232 @@ async def test_snapshot_read__reads_expected_values( assert actual_result_ranges == expected_result_ranges +@pytest_mark_asyncio +async def test_atomic_write__raises_when_given_endpoint_without_strong_consistency( + client: TestClient, +) -> None: + # this is considered an avoidable programmer error, so it raises + meta, eventual_endpoint = make_database_metadata_for_endpoint( + URL("https://example/"), endpoint_consistency=ConsistencyLevel.EVENTUAL + ) + with pytest.raises( + ValueError, + match=r"endpoints used with atomic_write must be " + r"", + ): + await atomic_write( + session=client.session, + meta=meta, + endpoint=eventual_endpoint, + write=AtomicWrite(), + ) + + +@pytest.mark.parametrize( + "path, mk_error", + [ + *generic_datapath_unsuccessful_response_params, + ( + "/violation_invalid_protobuf_body", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path request with invalid " + "AtomicWriteOutput", + data=b"\x00foo", + endpoint=endpoint, + ), + ), + ( + "/success_with_failed_checks", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path Atomic Write with SUCCESS " + "containing failed checks", + data=AtomicWriteOutput( + status=AtomicWriteStatus.AW_SUCCESS, + failed_checks=[0], + versionstamp=VersionStamp(0), + ), + endpoint=endpoint, + ), + ), + ( + "/success_with_invalid_versionstamp", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path Atomic Write with SUCCESS " + "containing an invalid versionstamp", + data=AtomicWriteOutput( + status=AtomicWriteStatus.AW_SUCCESS, + versionstamp=b"\xff", + ), + endpoint=endpoint, + ), + ), + ( + "/check_failure_with_out_of_bounds_index", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path Atomic Write with CHECK_FAILURE " + "referencing out-of-bounds check index", + data=AtomicWriteOutput( + status=AtomicWriteStatus.AW_CHECK_FAILURE, + failed_checks=[1], + ), + endpoint=endpoint, + ), + ), + ( + "/check_failure_without_failed_checks", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path Atomic Write with CHECK_FAILURE " + "containing no failed checks", + data=AtomicWriteOutput(status=AtomicWriteStatus.AW_CHECK_FAILURE), + endpoint=endpoint, + ), + ), + ( + "/unspecified_status", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path Atomic Write request " + "with status UNSPECIFIED", + data=AtomicWriteOutput(status=AtomicWriteStatus.AW_UNSPECIFIED), + endpoint=endpoint, + ), + ), + ( + "/invalid_status", + lambda endpoint: ProtocolViolation( + "Server responded to Data Path Atomic Write request " + "with status unknown: 6", + data=AtomicWriteOutput(status=6), # type: ignore[arg-type] + endpoint=endpoint, + ), + ), + ( + "/unusable", + lambda endpoint: EndpointNotUsable( + "Server responded to Data Path request indicating it is cannot " + "write this database", + reason=EndpointNotUsableReason.DISABLED, + endpoint=endpoint, + ), + ), + ], +) +@pytest_mark_asyncio +async def test_atomic_write__handles_unsuccessful_responses( + client: TestClient, + path: str, + mk_error: Callable[[EndpointInfo], DataPathDenoKvError], +) -> None: + server_url = client.make_url(path) + meta, endpoint = make_database_metadata_for_endpoint(endpoint_url=server_url) + error = mk_error(endpoint) + assert isinstance(error, DataPathDenoKvError) + + result = await atomic_write( + session=client.session, + meta=meta, + endpoint=endpoint, + write=AtomicWrite( + checks=[Check(key=pack_key(("x",)), versionstamp=bytes(VersionStamp(0)))] + ), + ) + assert isinstance(result, Err) + assert result.error == error + + +@pytest.fixture +def example_entries_write() -> Mapping[KvKeyTuple, object]: + return {("bigint", 1): 10} + + +# There's not really much point in testing many successful mutations here, as +# our atomic_write() function is just passing along the encoded protobuf data +# without doing anything to it — we're just testing the db implementation if we +# were to test lots of things here. Error cases are where all the work is. +@pytest.mark.parametrize( + "write, read_ranges, result_ranges", + [ + pytest.param(AtomicWrite(), [], [], id="empty"), + pytest.param( + AtomicWrite( + mutations=[ + Mutation( + key=pack_key(("bigint", 1)), + value=KvValue( + data=bytes(v8_bigint_encoder.encode(20)), + encoding=ValueEncoding.VE_V8, + ), + mutation_type=MutationType.M_SET, + ) + ] + ), + [ + ReadRange( + start=pack_key(("bigint", 1)), end=pack_key(("bigint", 2)), limit=1 + ), + ], + [[(KvKey("bigint", 1), 20)]], + id="set", + ), + pytest.param( + AtomicWrite( + mutations=[ + Mutation( + key=pack_key(("bigint", 1)), + value=KvValue( + data=bytes(v8_bigint_encoder.encode(20)), + encoding=ValueEncoding.VE_V8, + ), + mutation_type=MutationType.M_SUM, + ) + ] + ), + [ + ReadRange( + start=pack_key(("bigint", 1)), end=pack_key(("bigint", 2)), limit=1 + ), + ], + [[(KvKey("bigint", 1), 30)]], + id="sum", + ), + ], +) +@pytest.mark.parametrize("version", [1, 2, 3]) +@pytest_mark_asyncio +async def test_atomic_write__writes_expected_values( + client: TestClient, + mock_db: MockKvDb, + example_entries_write: Mapping[KvKeyTuple, object], + write: AtomicWrite, + read_ranges: list[ReadRange], + result_ranges: list[list[tuple[KvKeyTuple, object]]], + version: Literal[1, 2, 3], +) -> None: + server_url = client.make_url(f"/v{version}/consistency/strong/") + meta, endpoint = make_database_metadata_for_endpoint( + endpoint_url=server_url, version=version + ) + add_entries(mock_db, example_entries_write) + + write_result = await atomic_write( + session=client.session, meta=meta, endpoint=endpoint, write=write + ) + + assert is_ok(write_result) + write_ver = VersionStamp(write_result.value) + + actual_result_ranges = [ + [unsafe_parse_protobuf_kv_entry(raw_entry) for raw_entry in res_range.values] + for res_range in ( + mock_db.snapshot_read_range(read=range) for range in read_ranges + ) + ] + expected_result_ranges = [ + [KvEntry(key, value, versionstamp=write_ver) for (key, value) in entries] + for entries in result_ranges + ] + assert actual_result_ranges == expected_result_ranges + + @pytest.mark.parametrize( "raw_entry, decoded", [ @@ -1056,3 +1471,57 @@ def test_is_any_kv_key() -> None: assert is_any_kv_key(("a", 1, 1.0, True, b"b")) assert not is_any_kv_key([]) assert not is_any_kv_key(((),)) + + +@pytest.fixture +def example_endpoint() -> EndpointInfo: + _, endpoint = meta_endpoint( + make_database_metadata(endpoints=URL("https://example.com")) + ) + return endpoint + + +def test_CheckFailure(example_endpoint: EndpointInfo) -> None: + checks = [ + Check(key=bytes(KvKey(f"a{i}")), versionstamp=bytes(VersionStamp(i))) + for i in range(4) + ] + msg = "Not all checks required by the Atomic Write passed" + e = CheckFailure( + msg, + all_checks=iter(checks), + failed_check_indexes=[3, 0, 2], + endpoint=example_endpoint, + ) + assert e.all_checks == tuple(checks) + assert e.failed_check_indexes == {0, 2, 3} + # failed_check_indexes are ordered ascending + assert list(e.failed_check_indexes) == [0, 2, 3] + assert e.endpoint is example_endpoint + assert msg in str(e) + + +def test_CheckFailure__validates_constructor_args( + example_endpoint: EndpointInfo, +) -> None: + checks = [Check(key=bytes(KvKey("a")), versionstamp=bytes(VersionStamp(1)))] + + with pytest.raises(ValueError, match=r"all_checks is empty"): + CheckFailure( + "Foo", all_checks=[], failed_check_indexes=[], endpoint=example_endpoint + ) + + with pytest.raises(ValueError, match=r"failed_check_indexes is empty"): + CheckFailure( + "Foo", all_checks=checks, failed_check_indexes=[], endpoint=example_endpoint + ) + + with pytest.raises( + IndexError, match=r"failed_check_indexes contains out-of-bounds index" + ): + CheckFailure( + "Foo", + all_checks=checks, + failed_check_indexes=[5], + endpoint=example_endpoint, + ) From fc49e55b84831d4a6eb54211f737e02d2a854bd9 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 12 Oct 2024 08:26:00 +0000 Subject: [PATCH 07/52] feat: add EvalEnumRepr Enum mixin It makes Enums use 'EnumName.FIELD' as the repr rather than the default syntax with < ... >. --- src/denokv/_pycompat/enum.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/denokv/_pycompat/enum.py b/src/denokv/_pycompat/enum.py index 36b38ff..f570813 100644 --- a/src/denokv/_pycompat/enum.py +++ b/src/denokv/_pycompat/enum.py @@ -1,6 +1,7 @@ from __future__ import annotations import sys +from enum import Enum from enum import EnumMeta from enum import Flag from enum import IntFlag @@ -65,3 +66,19 @@ def __str__(self) -> str: else: from enum import IntEnum as IntEnum # noqa: F401 # re-export + + +class EvalEnumRepr(Enum): + """ + An Enum mixin that uses 'EnumName.FIELD' as the repr. + + Example + ------- + >>> class EnumName(EvalEnumRepr, Enum): + ... FIELD = 'a' + >>> EnumName.FIELD + EnumName.FIELD + """ + + def __repr__(self) -> str: + return f"{type(self).__name__}.{self.name}" From f8af2b7c4fe96cf143d02e11d3e93b8fbf1ac436 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 15:13:27 +0000 Subject: [PATCH 08/52] refactor: move standalone KV value types into submodule KvEntry, VersionStamp and KvU64 are now in denokv._kv_values. These types are needed by other submodules that I'd like to separate from the denokv.kv module. --- src/denokv/__init__.py | 6 +- src/denokv/_kv_values.py | 143 +++++++++++++++++++++++++++++++++++++++ src/denokv/kv.py | 129 +---------------------------------- test/denokv_testing.py | 6 +- test/test__kv_values.py | 45 ++++++++++++ test/test_datapath.py | 6 +- test/test_kv.py | 43 +----------- 7 files changed, 202 insertions(+), 176 deletions(-) create mode 100644 src/denokv/_kv_values.py create mode 100644 test/test__kv_values.py diff --git a/src/denokv/__init__.py b/src/denokv/__init__.py index 1340fdb..ce13266 100644 --- a/src/denokv/__init__.py +++ b/src/denokv/__init__.py @@ -1,5 +1,8 @@ from __future__ import annotations +from denokv._kv_values import KvEntry as KvEntry +from denokv._kv_values import KvU64 as KvU64 +from denokv._kv_values import VersionStamp as VersionStamp from denokv.auth import ConsistencyLevel as ConsistencyLevel from denokv.auth import MetadataExchangeDenoKvError as MetadataExchangeDenoKvError from denokv.datapath import AnyKvKey as AnyKvKey @@ -13,10 +16,7 @@ from denokv.kv import CursorFormatType as CursorFormatType from denokv.kv import Kv as Kv from denokv.kv import KvCredentials as KvCredentials -from denokv.kv import KvEntry as KvEntry from denokv.kv import KvListOptions as KvListOptions -from denokv.kv import KvU64 as KvU64 from denokv.kv import ListKvEntry as ListKvEntry -from denokv.kv import VersionStamp as VersionStamp from denokv.kv import open_kv as open_kv from denokv.kv_keys import KvKey as KvKey diff --git a/src/denokv/_kv_values.py b/src/denokv/_kv_values.py new file mode 100644 index 0000000..f93ecfa --- /dev/null +++ b/src/denokv/_kv_values.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +from binascii import unhexlify +from dataclasses import dataclass + +from denokv._pycompat.dataclasses import slots_if310 +from denokv._pycompat.typing import ClassVar +from denokv._pycompat.typing import Generic +from denokv._pycompat.typing import Self +from denokv._pycompat.typing import TypeVar +from denokv._pycompat.typing import TypeVarTuple +from denokv._pycompat.typing import Unpack +from denokv.datapath import AnyKvKeyT +from denokv.datapath import KvKeyPiece + +T = TypeVar("T", default=object) +# Note that the default arg doesn't seem to work with MyPy yet. The +# DefaultKvKey alias is what this should behave as when defaulted. +Pieces = TypeVarTuple("Pieces", default=Unpack[tuple[KvKeyPiece, ...]]) + + +@dataclass(frozen=True, **slots_if310()) +class KvEntry(Generic[AnyKvKeyT, T]): + """A value read from the Deno KV database, along with its key and version.""" + + key: AnyKvKeyT + value: T + versionstamp: VersionStamp + + +class VersionStamp(bytes): + r""" + A 20-hex-char / (10 byte) version identifier. + + This value represents the relative age of a KvEntry. A VersionStamp that + compares larger than another is newer. + + Examples + -------- + >>> VersionStamp(0xff << 16) + VersionStamp('00000000000000ff0000') + >>> int(VersionStamp('000000000000000000ff')) + 255 + >>> bytes(VersionStamp('00000000000000ff0000')) + b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00' + >>> VersionStamp(b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00') + VersionStamp('00000000000000ff0000') + >>> isinstance(VersionStamp(0), bytes) + True + >>> str(VersionStamp(0xff << 16)) + '00000000000000ff0000' + """ + + RANGE: ClassVar = range(0, 2**80) + + def __new__(cls, value: str | bytes | int) -> Self: + if isinstance(value, int): + if value not in VersionStamp.RANGE: + raise ValueError("value not in range for 80-bit unsigned int") + # Unlike most others, versionstamp uses big-endian as it needs to + # sort lexicographically as bytes. + value = value.to_bytes(length=10, byteorder="big") + if isinstance(value, str): + try: + value = unhexlify(value) + except Exception: + value = b"" + if len(value) != 10: + raise ValueError("value is not a 20 char hex string") + else: + if len(value) != 10: + raise ValueError("value is not 10 bytes long") + return bytes.__new__(cls, value) + + def __index__(self) -> int: + return int.from_bytes(self, byteorder="big") + + def __bytes__(self) -> bytes: + return self[:] + + def __str__(self) -> str: + return self.hex() + + def __repr__(self) -> str: + return f"{type(self).__name__}({str(self)!r})" + + +@dataclass(frozen=True, **slots_if310()) +class KvU64: + """ + An special int value that supports operations like `sum`, `max`, and `min`. + + Notes + ----- + This type is not an int subtype to avoid it being mistakenly flattened into + a regular int and loosing its special meaning when written back to the DB. + + Examples + -------- + >>> KvU64(bytes([0, 0, 0, 0, 0, 0, 0, 0])) + KvU64(0) + >>> KvU64(bytes([1, 0, 0, 0, 0, 0, 0, 0])) + KvU64(1) + >>> KvU64(bytes([1, 1, 0, 0, 0, 0, 0, 0])) + KvU64(257) + >>> KvU64(2**64 - 1) + KvU64(18446744073709551615) + >>> KvU64(2**64) + Traceback (most recent call last): + ... + ValueError: value not in range for 64-bit unsigned int + >>> KvU64(-1) + Traceback (most recent call last): + ... + ValueError: value not in range for 64-bit unsigned int + """ + + RANGE: ClassVar = range(0, 2**64) + value: int + + def __init__(self, value: bytes | int) -> None: + if isinstance(value, bytes): + if len(value) != 8: + raise ValueError("value must be a 8 bytes") + value = int.from_bytes(value, byteorder="little") + elif isinstance(value, int): + if value not in KvU64.RANGE: + raise ValueError("value not in range for 64-bit unsigned int") + else: + raise TypeError("value must be 8 bytes or a 64-bit unsigned int") + object.__setattr__(self, "value", value) + + def __index__(self) -> int: + return self.value + + def __bytes__(self) -> bytes: + return self.to_bytes() + + def to_bytes(self) -> bytes: + return self.value.to_bytes(8, byteorder="little") + + def __repr__(self) -> str: + return f"{type(self).__name__}({self.value})" diff --git a/src/denokv/kv.py b/src/denokv/kv.py index cde6e88..5cdcdce 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -4,7 +4,6 @@ import weakref from base64 import urlsafe_b64decode from base64 import urlsafe_b64encode -from binascii import unhexlify from contextlib import AbstractAsyncContextManager from dataclasses import dataclass from dataclasses import field @@ -24,11 +23,13 @@ from denokv._datapath_pb2 import ReadRange from denokv._datapath_pb2 import SnapshotRead from denokv._datapath_pb2 import SnapshotReadOutput +from denokv._kv_values import KvEntry +from denokv._kv_values import KvU64 +from denokv._kv_values import VersionStamp from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.typing import AsyncIterator from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable -from denokv._pycompat.typing import ClassVar from denokv._pycompat.typing import Final from denokv._pycompat.typing import Generic from denokv._pycompat.typing import Iterable @@ -91,15 +92,6 @@ class KvListOptions(TypedDict, total=False): cursor_format_type: CursorFormatType | None -@dataclass(frozen=True, **slots_if310()) -class KvEntry(Generic[AnyKvKeyT, T]): - """A value read from the Deno KV database, along with its key and version.""" - - key: AnyKvKeyT - value: T - versionstamp: VersionStamp - - @dataclass(frozen=True, **slots_if310()) class ListKvEntry(KvEntry[AnyKvKeyT, T]): """ @@ -120,121 +112,6 @@ def cursor(self) -> str: return result.value -class VersionStamp(bytes): - r""" - A 20-hex-char / (10 byte) version identifier. - - This value represents the relative age of a KvEntry. A VersionStamp that - compares larger than another is newer. - - Examples - -------- - >>> VersionStamp(0xff << 16) - VersionStamp('00000000000000ff0000') - >>> int(VersionStamp('000000000000000000ff')) - 255 - >>> bytes(VersionStamp('00000000000000ff0000')) - b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00' - >>> VersionStamp(b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00') - VersionStamp('00000000000000ff0000') - >>> isinstance(VersionStamp(0), bytes) - True - >>> str(VersionStamp(0xff << 16)) - '00000000000000ff0000' - """ - - RANGE: ClassVar = range(0, 2**80) - - def __new__(cls, value: str | bytes | int) -> Self: - if isinstance(value, int): - if value not in VersionStamp.RANGE: - raise ValueError("value not in range for 80-bit unsigned int") - # Unlike most others, versionstamp uses big-endian as it needs to - # sort lexicographically as bytes. - value = value.to_bytes(length=10, byteorder="big") - if isinstance(value, str): - try: - value = unhexlify(value) - except Exception: - value = b"" - if len(value) != 10: - raise ValueError("value is not a 20 char hex string") - else: - if len(value) != 10: - raise ValueError("value is not 10 bytes long") - return bytes.__new__(cls, value) - - def __index__(self) -> int: - return int.from_bytes(self, byteorder="big") - - def __bytes__(self) -> bytes: - return self[:] - - def __str__(self) -> str: - return self.hex() - - def __repr__(self) -> str: - return f"{type(self).__name__}({str(self)!r})" - - -@dataclass(frozen=True, **slots_if310()) -class KvU64: - """ - An special int value that supports operations like `sum`, `max`, and `min`. - - Notes - ----- - This type is not an int subtype to avoid it being mistakenly flattened into - a regular int and loosing its special meaning when written back to the DB. - - Examples - -------- - >>> KvU64(bytes([0, 0, 0, 0, 0, 0, 0, 0])) - KvU64(0) - >>> KvU64(bytes([1, 0, 0, 0, 0, 0, 0, 0])) - KvU64(1) - >>> KvU64(bytes([1, 1, 0, 0, 0, 0, 0, 0])) - KvU64(257) - >>> KvU64(2**64 - 1) - KvU64(18446744073709551615) - >>> KvU64(2**64) - Traceback (most recent call last): - ... - ValueError: value not in range for 64-bit unsigned int - >>> KvU64(-1) - Traceback (most recent call last): - ... - ValueError: value not in range for 64-bit unsigned int - """ - - RANGE: ClassVar = range(0, 2**64) - value: int - - def __init__(self, value: bytes | int) -> None: - if isinstance(value, bytes): - if len(value) != 8: - raise ValueError("value must be a 8 bytes") - value = int.from_bytes(value, byteorder="little") - elif isinstance(value, int): - if value not in KvU64.RANGE: - raise ValueError("value not in range for 64-bit unsigned int") - else: - raise TypeError("value must be 8 bytes or a 64-bit unsigned int") - object.__setattr__(self, "value", value) - - def __index__(self) -> int: - return self.value - - def __bytes__(self) -> bytes: - return self.to_bytes() - - def to_bytes(self) -> bytes: - return self.value.to_bytes(8, byteorder="little") - - def __repr__(self) -> str: - return f"{type(self).__name__}({self.value})" - - @dataclass(frozen=True, **slots_if310()) class EndpointSelector: # Right now this is very simple, which is fine for the local SQLite-backed diff --git a/test/denokv_testing.py b/test/denokv_testing.py index d4709b7..d474d4a 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -29,6 +29,9 @@ from denokv._datapath_pb2 import ReadRange from denokv._datapath_pb2 import ReadRangeOutput from denokv._datapath_pb2 import ValueEncoding +from denokv._kv_values import KvEntry +from denokv._kv_values import KvU64 +from denokv._kv_values import VersionStamp from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.protobuf import enum_name from denokv._pycompat.typing import Any @@ -49,11 +52,8 @@ from denokv.datapath import parse_protobuf_kv_entry from denokv.errors import InvalidCursor from denokv.kv import AnyCursorFormat -from denokv.kv import KvEntry -from denokv.kv import KvU64 from denokv.kv import LimitExceededPolicy from denokv.kv import ListContext -from denokv.kv import VersionStamp from denokv.kv_keys import KvKey from denokv.result import Err from denokv.result import Ok diff --git a/test/test__kv_values.py b/test/test__kv_values.py new file mode 100644 index 0000000..9b7305b --- /dev/null +++ b/test/test__kv_values.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from hypothesis import given +from hypothesis import strategies as st + +from denokv._kv_values import KvU64 +from denokv._kv_values import VersionStamp + + +@given(v=st.integers(min_value=0, max_value=2**80 - 1)) +def test_VersionStamp_init(v: int) -> None: + vs_int = VersionStamp(v) + assert int(vs_int) == v + assert VersionStamp(str(vs_int)) == vs_int + assert VersionStamp(bytes(vs_int)) == vs_int + assert bytes(vs_int) == vs_int + assert isinstance(vs_int, bytes) + + +@given(i=st.integers(min_value=0, max_value=2**64 - 1)) +def test_KvU64_init(i: int) -> None: + u64 = KvU64(i) + assert int(u64) == i + assert KvU64(bytes(u64)) == u64 + assert u64.to_bytes() == bytes(u64) + assert u64.to_bytes() == i.to_bytes(8, "little") + + +@given( + v1=st.integers(min_value=0, max_value=2**80 - 1), + v2=st.integers(min_value=0, max_value=2**80 - 1), +) +def test_VersionStamp_ordering(v1: int, v2: int) -> None: + vs1, vs2 = VersionStamp(v1), VersionStamp(v2) + if v1 < v2: + assert vs1 < vs2 + elif v1 > v2: + assert vs1 > vs2 + else: + assert vs1 == vs2 + + +def test_KVU64__bytes() -> None: + assert KvU64(bytes(KvU64(123456789))).value == 123456789 + assert KvU64(KvU64(123456789).to_bytes()).value == 123456789 diff --git a/test/test_datapath.py b/test/test_datapath.py index 6bbb167..139bc68 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -38,6 +38,9 @@ from denokv._datapath_pb2 import SnapshotReadOutput from denokv._datapath_pb2 import SnapshotReadStatus from denokv._datapath_pb2 import ValueEncoding +from denokv._kv_values import KvEntry +from denokv._kv_values import KvU64 +from denokv._kv_values import VersionStamp from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable from denokv._pycompat.typing import Final @@ -70,9 +73,6 @@ from denokv.datapath import parse_protobuf_kv_entry from denokv.datapath import read_range_single from denokv.datapath import snapshot_read -from denokv.kv import KvEntry -from denokv.kv import KvU64 -from denokv.kv import VersionStamp from denokv.kv_keys import KvKey from denokv.result import Err from denokv.result import Ok diff --git a/test/test_kv.py b/test/test_kv.py index 2389c43..7f94d11 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -33,6 +33,8 @@ from denokv._datapath_pb2 import SnapshotReadOutput from denokv._datapath_pb2 import SnapshotReadStatus from denokv._datapath_pb2 import ValueEncoding +from denokv._kv_values import KvEntry +from denokv._kv_values import VersionStamp from denokv._pycompat.enum import StrEnum from denokv._pycompat.typing import Any from denokv._pycompat.typing import AsyncGenerator @@ -65,12 +67,9 @@ from denokv.kv import DatabaseMetadataCache from denokv.kv import EndpointSelector from denokv.kv import Kv -from denokv.kv import KvEntry from denokv.kv import KvFlags from denokv.kv import KvListOptions -from denokv.kv import KvU64 from denokv.kv import OpenKvFinalize -from denokv.kv import VersionStamp from denokv.kv import normalize_key from denokv.kv import open_kv from denokv.kv_keys import KvKey @@ -88,44 +87,6 @@ pytest_mark_asyncio = pytest.mark.asyncio() -@given(v=st.integers(min_value=0, max_value=2**80 - 1)) -def test_VersionStamp_init(v: int) -> None: - vs_int = VersionStamp(v) - assert int(vs_int) == v - assert VersionStamp(str(vs_int)) == vs_int - assert VersionStamp(bytes(vs_int)) == vs_int - assert bytes(vs_int) == vs_int - assert isinstance(vs_int, bytes) - - -@given(i=st.integers(min_value=0, max_value=2**64 - 1)) -def test_KvU64_init(i: int) -> None: - u64 = KvU64(i) - assert int(u64) == i - assert KvU64(bytes(u64)) == u64 - assert u64.to_bytes() == bytes(u64) - assert u64.to_bytes() == i.to_bytes(8, "little") - - -@given( - v1=st.integers(min_value=0, max_value=2**80 - 1), - v2=st.integers(min_value=0, max_value=2**80 - 1), -) -def test_VersionStamp_ordering(v1: int, v2: int) -> None: - vs1, vs2 = VersionStamp(v1), VersionStamp(v2) - if v1 < v2: - assert vs1 < vs2 - elif v1 > v2: - assert vs1 > vs2 - else: - assert vs1 == vs2 - - -def test_KVU64__bytes() -> None: - assert KvU64(bytes(KvU64(123456789))).value == 123456789 - assert KvU64(KvU64(123456789).to_bytes()).value == 123456789 - - def test_EndpointSelector__rejects_meta_without_strong_endpoint() -> None: meta_no_strong = mk_db_meta( [ From b68869c222aacfc7536670223cd9e25b6473a714 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 05:53:17 +0000 Subject: [PATCH 09/52] feat: add types for Kv write API PlannedWrite is the central type, it represents the collection of mutation operations to be applied to the DB in a single atomic write. We have a KvWriter protocol which PlannedWrite implements, it'll be used by Kv so that we don't have a circular dependency between denokv.kv.Kv and denokv._kv_writes.PlannedWrite. In addition, we now have a ProtobufMessageRepresentation[MessageT] type which can be used to generalise the types representing datapath protocol types, like Sum, Max, Min, etc; to avoid coupling PlannedWrite to our default implementations. --- src/denokv/_kv_types.py | 32 ++ src/denokv/_kv_writes.py | 718 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 750 insertions(+) create mode 100644 src/denokv/_kv_types.py create mode 100644 src/denokv/_kv_writes.py diff --git a/src/denokv/_kv_types.py b/src/denokv/_kv_types.py new file mode 100644 index 0000000..2600e73 --- /dev/null +++ b/src/denokv/_kv_types.py @@ -0,0 +1,32 @@ +from abc import ABC +from abc import abstractmethod + +from google.protobuf.message import Message +from v8serialize import Encoder + +from denokv._datapath_pb2 import AtomicWrite +from denokv._pycompat.typing import Generic +from denokv._pycompat.typing import Protocol +from denokv._pycompat.typing import TypeVar + +WriteResultT = TypeVar("WriteResultT") +MessageT_co = TypeVar("MessageT_co", bound=Message, covariant=True) + + +class ProtobufMessageRepresentation(Generic[MessageT_co], ABC): + """An object that can represent itself as a protobuf Message.""" + + __slots__ = () + + @abstractmethod + def as_protobuf(self, *, v8_encoder: Encoder) -> MessageT_co: ... + + +class AtomicWriteRepresentation(ProtobufMessageRepresentation[AtomicWrite]): + __slots__ = () + + +class KvWriter(Protocol): + async def write( + self, atomic_write: AtomicWriteRepresentation, / + ) -> WriteResultT: ... diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py new file mode 100644 index 0000000..8de85e2 --- /dev/null +++ b/src/denokv/_kv_writes.py @@ -0,0 +1,718 @@ +from __future__ import annotations + +from abc import ABC +from abc import abstractmethod +from dataclasses import dataclass +from dataclasses import field +from datetime import datetime +from enum import Enum +from itertools import islice +from types import MappingProxyType +from typing import Literal +from typing import overload + +import v8serialize +from v8serialize import Encoder +from v8serialize.constants import SerializationTag +from v8serialize.decode import ReadableTagStream + +from denokv import _datapath_pb2 as dp_protobuf +from denokv._datapath_pb2 import AtomicWrite +from denokv._kv_types import AtomicWriteRepresentation +from denokv._kv_types import KvWriter +from denokv._kv_values import KvEntry as KvEntry +from denokv._kv_values import KvU64 as KvU64 +from denokv._kv_values import VersionStamp as VersionStamp +from denokv._pycompat.dataclasses import FrozenAfterInitDataclass +from denokv._pycompat.dataclasses import slots_if310 +from denokv._pycompat.enum import EvalEnumRepr +from denokv._pycompat.protobuf import enum_name +from denokv._pycompat.typing import TYPE_CHECKING +from denokv._pycompat.typing import Container +from denokv._pycompat.typing import Mapping +from denokv._pycompat.typing import MutableSequence +from denokv._pycompat.typing import Protocol +from denokv._pycompat.typing import Self +from denokv._pycompat.typing import Sequence +from denokv._pycompat.typing import TypeAlias +from denokv._pycompat.typing import TypeIs +from denokv._pycompat.typing import Union +from denokv._pycompat.typing import cast +from denokv._pycompat.typing import runtime_checkable +from denokv.backoff import Backoff +from denokv.backoff import ExponentialBackoff +from denokv.datapath import AnyKvKey +from denokv.datapath import pack_key +from denokv.kv_keys import KvKey + + +def encode_kv_write_value(value: object, *, v8_encoder: Encoder) -> dp_protobuf.KvValue: + if isinstance(value, KvU64): + return dp_protobuf.KvValue( + data=bytes(value), + encoding=dp_protobuf.ValueEncoding.VE_LE64, + ) + elif isinstance(value, bytes): + return dp_protobuf.KvValue( + data=value, encoding=dp_protobuf.ValueEncoding.VE_BYTES + ) + else: + return dp_protobuf.KvValue( + data=bytes(v8_encoder.encode(value)), + encoding=dp_protobuf.ValueEncoding.VE_V8, + ) + + +@dataclass +class PlannedWrite(AtomicWriteRepresentation): + kv: KvWriter | None = field(default=None) + checks: MutableSequence[AnyKeyVersion] = field(default_factory=list) + mutations: MutableSequence[Mutation] = field(default_factory=list) + enqueues: MutableSequence[Enqueue] = field(default_factory=list) + + async def write(self, kv: KvWriter | None = None) -> CompletedWrite: + kv = self.kv if kv is None else kv + if kv is None: + raise TypeError("No kv was provided to write") + return await kv.write(self) + + def as_protobuf(self, *, v8_encoder: Encoder) -> AtomicWrite: + return AtomicWrite( + checks=[ + dp_protobuf.Check( + key=pack_key(check.key), versionstamp=check.versionstamp + ) + for check in self.checks + ], + mutations=[ + mut.as_protobuf(v8_encoder=v8_encoder) for mut in self.mutations + ], + enqueues=[enq.as_protobuf(v8_encoder=v8_encoder) for enq in self.enqueues], + ) + + @overload + def check(self, key: AnyKvKey, versionstamp: VersionStamp | None) -> Self: ... + + @overload + def check(self, check: AnyKeyVersion, /) -> Self: ... + + def check( + self, key: AnyKvKey | AnyKeyVersion, versionstamp: VersionStamp | None = None + ) -> Self: + if isinstance(key, AnyKeyVersion): + self.checks.append(key) + if versionstamp is not None: + raise TypeError( + "versionstamp argument cannot be passed when first argument " + "is check object with a key and versionstamp" + ) + else: + self.checks.append(Check(key, versionstamp)) + return self + + def set(self, key: AnyKvKey, value: object, *, versioned: bool = False) -> Self: + return self.mutate(Set(key, value, versioned=versioned)) + + @overload + def sum(self, sum: Sum, /) -> Self: ... + + @overload + def sum(self, key: AnyKvKey, value: KvU64) -> Self: ... + + @overload + def sum( + self, + key: AnyKvKey, + value: int | float, + *, + limit_min: int | float | None = None, + limit_max: int | float | None = None, + limit_exceeded: LimitExceededInput | None = None, + limit: Limit | None = None, + ) -> Self: ... + + def sum( + self, + key: AnyKvKey | Sum, + value: int | float | KvU64 | None = None, + *, + limit_min: int | float | None = None, + limit_max: int | float | None = None, + limit_exceeded: LimitExceededInput | None = None, + limit: Limit | None = None, + ) -> Self: + if isinstance(key, Sum): + if value is not None: + raise TypeError("sum() takes no arguments after 'sum'") + return self.mutate(key) + + if value is None: + raise TypeError("sum() missing 1 required positional argument: 'value'") + if limit is None: + if not (limit_min is None and limit_max is None and limit_exceeded is None): + limit = Limit( + min=limit_min, max=limit_max, limit_exceeded=limit_exceeded + ) + else: + limit_min = limit.min if limit_min is None else limit_min + limit_max = limit.max if limit_max is None else limit_max + limit_exceeded = ( + cast(LimitExceededInput, limit.limit_exceeded) + if limit_exceeded is None + else limit_exceeded + ) + limit = Limit(limit_min, limit_max, limit_exceeded) + + return self.mutate(Sum(key, value, limit=limit)) + + def min(self, key: AnyKvKey, value: int | KvU64) -> Self: + return self.mutate(Min(key, value)) + + def max(self, key: AnyKvKey, value: int | KvU64) -> Self: + return self.mutate(Max(key, value)) + + def delete(self, key: AnyKvKey) -> Self: + return self.mutate(Delete(key)) + + def mutate(self, mutation: Mutation) -> Self: + self.mutations.append(mutation) + return self + + @overload + def enqueue(self, enqueue: Enqueue, /) -> Self: ... + @overload + def enqueue( + self, + message: object, + *, + delivery_time: datetime | None = None, + retry_delays: Backoff | None = None, + dead_letter_keys: Sequence[AnyKvKey] | None = None, + ) -> Self: ... + def enqueue( + self, + message: object | Enqueue, + *, + delivery_time: datetime | None = None, + retry_delays: Backoff | None = None, + dead_letter_keys: Sequence[AnyKvKey] | None = None, + ) -> Self: + if isinstance(message, Enqueue): + enqueue = message + else: + enqueue = Enqueue( + message, + delivery_time=delivery_time, + retry_delays=retry_delays, + dead_letter_keys=dead_letter_keys, + ) + self.enqueues.append(enqueue) + return self + + +@dataclass(init=False, **slots_if310()) +class ConflictedWrite(FrozenAfterInitDataclass): + ok: Literal[False] + conflicts: Mapping[AnyKvKey, Check] + versionstamp: None + checks: Sequence[Check] + mutations: Sequence[Mutation] + enqueues: Sequence[Enqueue] + + def __init__( + self, + failed_checks: Sequence[int], + checks: Sequence[Check], + mutations: Sequence[Mutation], + enqueues: Sequence[Enqueue], + ) -> None: + self.ok = False + try: + self.conflicts = MappingProxyType( + {checks[i].key: checks[i] for i in failed_checks} + ) + except IndexError as e: + raise ValueError("failed_checks contains out-of-bounds index") from e + self.versionstamp = None + self.checks = tuple(checks) + self.mutations = tuple(mutations) + self.enqueues = tuple(enqueues) + + +@dataclass(init=False, **slots_if310()) +class CommittedWrite(FrozenAfterInitDataclass): + ok: Literal[True] + conflicts: Mapping[KvKey, Check] # empty + versionstamp: VersionStamp + checks: Sequence[Check] + mutations: Sequence[Mutation] + enqueues: Sequence[Enqueue] + + def __init__( + self, + versionstamp: VersionStamp, + checks: Sequence[Check], + mutations: Sequence[Mutation], + enqueues: Sequence[Enqueue], + ) -> None: + self.ok = True + self.conflicts = MappingProxyType({}) + self.versionstamp = versionstamp + self.checks = tuple(checks) + self.mutations = tuple(mutations) + self.enqueues = tuple(enqueues) + + +CompletedWrite: TypeAlias = Union[CommittedWrite, ConflictedWrite] + + +def is_applied(write: CompletedWrite) -> TypeIs[CommittedWrite]: + return isinstance(write, CommittedWrite) + + +@runtime_checkable +class AnyKeyVersion(Protocol): + __slots__ = () + + if TYPE_CHECKING: + + @property + def key(self) -> AnyKvKey: ... + @property + def versionstamp(self) -> VersionStamp | None: ... + else: + key = ... + versionstamp = ... + + +@dataclass(frozen=True, **slots_if310()) +class Check(AnyKeyVersion): + key: AnyKvKey + versionstamp: VersionStamp | None + + +@dataclass(init=False, **slots_if310()) +class Mutation(FrozenAfterInitDataclass, ABC): + key: AnyKvKey + expire_at: datetime | None + + def __init__(self, key: AnyKvKey, expire_at: datetime | None) -> None: + if type(self) is Mutation: + raise TypeError("cannot create Mutation instances directly") + self.key = key + self.expire_at = expire_at + + @abstractmethod + def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: + pass + + def _expire_at_ms(self) -> int: + return 0 if self.expire_at is None else int(self.expire_at.timestamp() * 1000) + + +@dataclass(init=False, **slots_if310()) +class Set(Mutation): + value: object + versioned: bool + + def __init__( + self, + key: AnyKvKey, + value: object, + *, + expire_at: datetime | None = None, + versioned: bool = False, + ) -> None: + super(Set, self).__init__(key, expire_at=expire_at) + self.value = value + self.versioned = versioned + + def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: + return dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_SET_SUFFIX_VERSIONSTAMPED_KEY + if self.versioned + else dp_protobuf.MutationType.M_SET, + key=pack_key(self.key), + value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), + expire_at_ms=self._expire_at_ms(), + ) + + +class LimitExceededPolicy(EvalEnumRepr, Enum): + ERROR = "error" + CLAMP = "clamp" + WRAP = "wrap" + + +LimitExceededInput = Literal[ + "error", + "clamp", + LimitExceededPolicy.ERROR, + LimitExceededPolicy.CLAMP, +] + + +@dataclass(init=False, frozen=True, **slots_if310()) +class Limit(Container["int | float"]): + """ + A range of numbers used to define the allowed range of Add operations. + + Examples + -------- + >>> lim = Limit(0, 100, limit_exceeded='clamp') + >>> lim + Limit(min=0, max=100, limit_exceeded=LimitExceededPolicy.CLAMP) + >>> -10 in lim + False + >>> 110 in lim + False + >>> 10 in lim + True + >>> 9000 in Limit(min=0) + True + """ + + min: int | float | None + max: int | float | None + limit_exceeded: LimitExceededPolicy + + def __init__( + self, + min: int | float | None = None, + max: int | float | None = None, + limit_exceeded: LimitExceededInput | None = LimitExceededPolicy.ERROR, + ) -> None: + object.__setattr__(self, "min", min) + object.__setattr__(self, "max", max) + object.__setattr__( + self, + "limit_exceeded", + LimitExceededPolicy(limit_exceeded or LimitExceededPolicy.ERROR), + ) + + def __contains__(self, x: object) -> bool: + if not isinstance(x, (int, float)): + return False + return (self.min is None or self.min <= x) and ( + self.max is None or self.max >= x + ) + + def as_protobuf( + self, + mutation: dp_protobuf.Mutation, + *, + v8_encoder: Encoder, + value_type: type[int | float], + ) -> dp_protobuf.Mutation: + if value_type not in (int, float): + raise TypeError(f"value_type must be int or float: {value_type!r}") + + if self.min is not None: + encoded_min = bytes(v8_encoder.encode(self.min)) + Limit._validate_encoded_type( + "min", + value=self.min, + v8_value=encoded_min, + required_encoding=value_type, + ) + mutation.sum_min = encoded_min + if self.max is not None: + encoded_max = bytes(v8_encoder.encode(self.max)) + Limit._validate_encoded_type( + "max", + value=self.max, + v8_value=encoded_max, + required_encoding=value_type, + ) + mutation.sum_max = encoded_max + if self.limit_exceeded is LimitExceededPolicy.CLAMP: + mutation.sum_clamp = True + return mutation + + @staticmethod + def _validate_encoded_type( + field: Literal["min", "max"], + value: int | float, + v8_value: bytes, + required_encoding: type[int | float], + ) -> None: + assert required_encoding in (int, float) + try: + value_type = _get_number_type(_get_v8_value_tag(v8_value)) + except ValueError as e: + raise RuntimeError( + f"Limit.{field} is not None so must encode to BigInt or " + f"Number using the configured v8_encoder, but it didn't: " + f"value={value}, v8_value={v8_value!r}, error={e}" + ) from e + if value_type is not required_encoding: + raise ValueError( + f"Limit.{field} encoded to {_js_type_name(value_type)} ({value_type}) " + f"but the parent Sum's value encoded as " + f"{_js_type_name(required_encoding)} ({required_encoding}). " + "Both must encode to the same JavaScript type. Use int or " + "float consistently for both. If a the V8 serializer is " + "customised, check how it's encoding int and float values." + ) + + +def _js_type_name(py_type: type[int | float]) -> Literal["BigInt", "Number"]: + return "BigInt" if py_type is int else "Number" + + +LIMIT_KVU64 = Limit( + min=0, + max=2**64 - 1, + # Not normally allowed by types because only LIMIT_KVU64 can use WRAP. + limit_exceeded=cast(LimitExceededInput, LimitExceededPolicy.WRAP), +) +LIMIT_UNLIMITED = Limit() + + +@dataclass(init=False, **slots_if310()) +class Sum(Mutation): + value: int | float | KvU64 + limit: Limit = field(default=Limit()) + + def __init__( + self, + key: AnyKvKey, + value: int | float | KvU64, + *, + limit: Limit | None = None, + expire_at: datetime | None = None, + ) -> None: + super(Sum, self).__init__(key, expire_at=expire_at) + + # Only KvU64 supports wrapping on boundary (and this can't be changed). + if isinstance(value, KvU64): + if limit is not None and limit != LIMIT_KVU64: + raise ValueError( + "limit for KvU64 cannot be changed, it must be None or LIMIT_KVU64" + ) + limit = LIMIT_KVU64 + else: + if limit is None: + limit = LIMIT_UNLIMITED + elif limit.limit_exceeded == LimitExceededPolicy.WRAP: + raise ValueError( + "limit for JavaScript BigInt or Number cannot be WRAP, it " + "must be ERROR or CLAMP" + ) + assert limit is not None + + self.value = value + self.limit = limit + + def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_SUM, + key=pack_key(self.key), + value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), + expire_at_ms=self._expire_at_ms(), + ) + + v8_number_type = _validate_number_mutation_value(self, mutation) + if v8_number_type is not None: + assert mutation.value.encoding == dp_protobuf.ValueEncoding.VE_V8 + # Only V8 values use the min/max limits. + self.limit.as_protobuf( + mutation, v8_encoder=v8_encoder, value_type=v8_number_type + ) + + return mutation + + +def _validate_number_mutation_value( + mut: Sum | Min | Max, mutation: dp_protobuf.Mutation +) -> type[int | float] | None: + """ + Validate the encoded numeric value of a Sum/Min/Max mutation operation. + + If the operation value is a V8-encoded number, the return value is the int + or float type, indicating if the encoded value is BigInt or Number. + Otherwise the return value is None. + """ + if mutation.value.encoding == dp_protobuf.ValueEncoding.VE_LE64: + return None + elif mutation.value.encoding == dp_protobuf.ValueEncoding.VE_V8: + try: + value_type = _get_number_type(_get_v8_value_tag(mutation.value.data)) + except ValueError as e: + raise RuntimeError( + f"{type(mut).__name__}.value is not KvU64 so it must encode to " + f"BigInt or Number using the configured v8_encoder, but it didn't: " + f"value={mut.value!r}, v8_value={mutation.value.data!r}, error={e}" + ) from e + + return value_type + + raise ValueError( + f"{type(mut).__name__}.value is not a KvU64 or number that " + f"V8-serializes to BigInt or Number: value={mut.value!r}, ValueEncoding: " + f"{enum_name(dp_protobuf.ValueEncoding, mutation.value.encoding)}" + ) + + +def _get_v8_value_tag(v8_value: bytes) -> SerializationTag: + """Inspect a V8-serialized value to determine the type of value it holds.""" + try: + rts = ReadableTagStream(v8_value) + rts.read_header() + return rts.read_tag() + except v8serialize.V8SerializeError as e: + raise ValueError("v8_value bytes does not contain a V8-encoded value") from e + + +def _get_number_type(tag: SerializationTag) -> type[int | float]: + """Determine the JS number type of a V8-serialized value type tag.""" + if tag is SerializationTag.kBigInt or tag is SerializationTag.kBigIntObject: + return int + elif tag in { + SerializationTag.kNumberObject, + SerializationTag.kDouble, + SerializationTag.kInt32, + SerializationTag.kUint32, + }: + return float + raise ValueError(f"tag is not a BigInt or Number: {tag}") + + +@dataclass(**slots_if310()) +class Min(Mutation): + value: KvU64 + + def __init__( + self, + key: AnyKvKey, + value: int | KvU64, + *, + expire_at: datetime | None = None, + ) -> None: + super(Min, self).__init__(key, expire_at=expire_at) + self.value = value if isinstance(value, KvU64) else KvU64(value) + + def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_MIN, + key=pack_key(self.key), + value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), + expire_at_ms=self._expire_at_ms(), + ) + _validate_number_mutation_value(self, mutation) + return mutation + + +@dataclass(**slots_if310()) +class Max(Mutation): + value: int | float | KvU64 + + def __init__( + self, + key: AnyKvKey, + value: int | KvU64, + *, + expire_at: datetime | None = None, + ) -> None: + super(Max, self).__init__(key, expire_at=expire_at) + self.value = value if isinstance(value, KvU64) else KvU64(value) + + def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_MAX, + key=pack_key(self.key), + value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), + expire_at_ms=self._expire_at_ms(), + ) + _validate_number_mutation_value(self, mutation) + return mutation + + +@dataclass(**slots_if310()) +class Delete(Mutation): + def __init__(self, key: AnyKvKey) -> None: + super(Delete, self).__init__(key, expire_at=None) + + def as_protobuf(self, *, v8_encoder: Encoder | None = None) -> dp_protobuf.Mutation: + return dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_DELETE, key=pack_key(self.key) + ) + + +DEFAULT_ENQUEUE_RETRY_DELAYS = ExponentialBackoff( + initial_interval_seconds=1, multiplier=3 +) +DEFAULT_ENQUEUE_RETRY_DELAY_COUNT = 10 + + +@dataclass(init=False, **slots_if310()) +class Enqueue(FrozenAfterInitDataclass): + """ + A message to be async-delivered to a Deno app listening to the Kv's queue. + + Parameters + ---------- + message: + The message to deliver. Can be any value that can be written to the database. + delivery_time: + Delay the message delivery until this time. + + If the time is None or in the past, the message is delivered as soon as + possible. + retry_delays: + Delivery attempts that fail will be retried after these delays. + + If the value is an Iterable, a fixed number of values will be drawn to retry + with. Use a fixed-length Sequence to specify a precise number of retries. + Default: DEFAULT_ENQUEUE_RETRY_DELAYS + dead_letter_keys: + Messages that cannot be delivered will be written to these keys. + + Notes + ----- + See [Deno.Kv.listenQueue()](https://docs.deno.com/api/deno/~/Deno.Kv#method_listenqueue_0) + """ + + message: object + delivery_time: datetime | None + retry_delays: Backoff + dead_letter_keys: Sequence[AnyKvKey] + + def __init__( + self, + message: object, + *, + delivery_time: datetime | None = None, + retry_delays: Backoff | None = None, + dead_letter_keys: Sequence[AnyKvKey] | None = None, + ): + self.message = message + self.delivery_time = delivery_time + self.retry_delays = ( + DEFAULT_ENQUEUE_RETRY_DELAYS if retry_delays is None else retry_delays + ) + self.dead_letter_keys = () if dead_letter_keys is None else dead_letter_keys + + def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Enqueue: + deadline_ms = None + if self.delivery_time is not None: + deadline_ms = int(self.delivery_time.timestamp() * 1000) + return dp_protobuf.Enqueue( + payload=bytes(v8_encoder.encode(self.message)), + keys_if_undelivered=[pack_key(k) for k in self.dead_letter_keys], + deadline_ms=deadline_ms, + backoff_schedule=self._evaluate_backoff_schedule(), + ) + + def _evaluate_backoff_schedule(self) -> Sequence[int]: + # Sample a fixed max number from unknown-length iterables. + delay_seconds = ( + self.retry_delays + if isinstance(self.retry_delays, Sequence) + else islice(self.retry_delays, DEFAULT_ENQUEUE_RETRY_DELAY_COUNT) + ) + # Backoff times are in seconds, but we need milliseconds + return [int(delay * 1000) for delay in delay_seconds] + + +WriteOperation: TypeAlias = Union[Check, Set, Sum, Min, Max, Delete, Enqueue] From 7d77861c46976a8de030bfe342e8e04e542a6a0d Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 05:41:40 +0000 Subject: [PATCH 10/52] feat: add create_default_v8_encoder() We need a slightly customised V8 serialization format encoder to handle int and float consistently in write sum/min/max operations. --- src/denokv/kv.py | 33 +++++++++++++++++++++++++++++++++ test/denokv_testing.py | 22 ++-------------------- 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/src/denokv/kv.py b/src/denokv/kv.py index 5cdcdce..fbb1ae9 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -15,6 +15,7 @@ from typing import overload import aiohttp +import v8serialize from fdb.tuple import unpack from v8serialize import Decoder from yarl import URL @@ -81,6 +82,38 @@ CursorFormatType: TypeAlias = Callable[["ListContext"], "AnyCursorFormat"] +def v8_encode_int_as_bigint( + value: object, + ctx: v8serialize.encode.EncodeContext, + next: v8serialize.encode.EncodeNextFn, +) -> None: + if isinstance(value, int): + ctx.stream.write_bigint(value) + else: + next(value) + + +# TODO: add an explicit tagged JSBigInt type to v8serialize +def create_default_v8_encoder() -> v8serialize.Encoder: + """ + Create a new V8-serialization format Encoder. + + This encoder always encodes int as JavaScript BigInt. We use this by default + for Kv instances to ensure consistent handling of int and float types. + + Notes + ----- + In contrast, the `v8serialize` default encoder encodes int as Number when it + fits in the +/- 2**53 - 1 range which float64 can represent exactly. This + results in differing number representation for different number sizes, which + is likely to be a footgun in the context of the Sum/Min/Max write + operations. + """ + return v8serialize.Encoder( + encode_steps=[v8_encode_int_as_bigint, *v8serialize.default_encode_steps] + ) + + class KvListOptions(TypedDict, total=False): """Keyword arguments of `Kv.list()`.""" diff --git a/test/denokv_testing.py b/test/denokv_testing.py index d474d4a..3e17764 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -54,6 +54,7 @@ from denokv.kv import AnyCursorFormat from denokv.kv import LimitExceededPolicy from denokv.kv import ListContext +from denokv.kv import create_default_v8_encoder from denokv.kv_keys import KvKey from denokv.result import Err from denokv.result import Ok @@ -66,26 +67,7 @@ E2 = TypeVar("E2") v8_decoder = v8serialize.Decoder() - - -def v8_encode_int_as_bigint( - value: object, - ctx: v8serialize.encode.EncodeContext, - next: v8serialize.encode.EncodeNextFn, -) -> None: - if isinstance(value, int): - ctx.stream.write_bigint(value) - else: - next(value) - - -# The default v8serialize encoder encodes int as number when it fits in the -# +/- 2**53 - 1 range which float64 can represent exactly. We want to encode int -# as bigint. -# TODO: add an explicit tagged JSBigInt type to v8serialize -v8_bigint_encoder = v8serialize.Encoder( - encode_steps=[v8_encode_int_as_bigint, *v8serialize.default_encode_steps] -) +v8_bigint_encoder = create_default_v8_encoder() def assume_ok(result: Result[T, E]) -> T: From fcc8fd7fe58012d3bf228d07efbe2952236d1231 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 04:14:21 +0000 Subject: [PATCH 11/52] feat: implement Kv.write() API This allows making atomic_write datapath requests to make changes to the database, by translating PlannedWrite (& related types) into AtomicWrite protobuf messages. --- src/denokv/kv.py | 121 +++++++++++++++++-- test/denokv_testing.py | 2 +- test/test_kv.py | 266 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 376 insertions(+), 13 deletions(-) diff --git a/src/denokv/kv.py b/src/denokv/kv.py index fbb1ae9..9b237fb 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -9,6 +9,7 @@ from dataclasses import field from enum import Flag from enum import auto +from functools import partial from os import environ from types import TracebackType from typing import Literal @@ -18,19 +19,31 @@ import v8serialize from fdb.tuple import unpack from v8serialize import Decoder +from v8serialize import Encoder from yarl import URL +from denokv import _datapath_pb2 as dp_protobuf from denokv import datapath -from denokv._datapath_pb2 import ReadRange +from denokv._datapath_pb2 import AtomicWrite from denokv._datapath_pb2 import SnapshotRead from denokv._datapath_pb2 import SnapshotReadOutput from denokv._kv_values import KvEntry from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp +from denokv._kv_writes import Check +from denokv._kv_writes import CommittedWrite +from denokv._kv_writes import CompletedWrite +from denokv._kv_writes import ConflictedWrite +from denokv._kv_writes import Enqueue +from denokv._kv_writes import Mutation +from denokv._kv_writes import PlannedWrite +from denokv._kv_writes import WriteOperation from denokv._pycompat.dataclasses import slots_if310 +from denokv._pycompat.typing import Any from denokv._pycompat.typing import AsyncIterator from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable +from denokv._pycompat.typing import Coroutine from denokv._pycompat.typing import Final from denokv._pycompat.typing import Generic from denokv._pycompat.typing import Iterable @@ -55,12 +68,13 @@ from denokv.datapath import AnyKvKey from denokv.datapath import AnyKvKeyT from denokv.datapath import AutoRetry +from denokv.datapath import CheckFailure +from denokv.datapath import DataPathDenoKvError from denokv.datapath import DataPathError from denokv.datapath import KvKeyEncodable from denokv.datapath import KvKeyPiece from denokv.datapath import KvKeyTuple from denokv.datapath import ProtocolViolation -from denokv.datapath import SnapshotReadResult from denokv.datapath import is_kv_key_tuple from denokv.datapath import pack_key from denokv.datapath import parse_protobuf_kv_entry @@ -76,6 +90,7 @@ # Note that the default arg doesn't seem to work with MyPy yet. The # DefaultKvKey alias is what this should behave as when defaulted. Pieces = TypeVarTuple("Pieces", default=Unpack[tuple[KvKeyPiece, ...]]) +_DataPathErrorT = TypeVar("_DataPathErrorT", bound=DataPathDenoKvError) SAFE_FLOAT_INT_RANGE: Final = range(-(2**53 - 1), 2**53) # 2**53 - 1 is max safe @@ -387,6 +402,7 @@ class Kv(AbstractAsyncContextManager["Kv", None]): session: aiohttp.ClientSession retry_delays: Backoff metadata_cache: DatabaseMetadataCache + v8_encoder: Encoder v8_decoder: Decoder flags: KvFlags @@ -395,12 +411,14 @@ def __init__( session: aiohttp.ClientSession, auth: AuthenticatorFn, retry: Backoff | None = None, + v8_encoder: Encoder | None = None, v8_decoder: Decoder | None = None, flags: KvFlags | None = None, ) -> None: self.session = session self.metadata_cache = DatabaseMetadataCache(authenticator=auth) self.retry_delays = ExponentialBackoff() if retry is None else retry + self.v8_encoder = v8_encoder or create_default_v8_encoder() self.v8_decoder = v8_decoder or Decoder() self.flags = KvFlags.IntAsNumber if flags is None else flags @@ -514,7 +532,7 @@ async def get( args = tuple(self._prepare_key(key) for key in args) ranges = [read_range_single(key) for key in args] snapshot_read_result = await self._snapshot_read( - ranges, consistency=consistency + dp_protobuf.SnapshotRead(ranges=ranges), consistency=consistency ) if isinstance(snapshot_read_result, Err): raise snapshot_read_result.error @@ -664,7 +682,7 @@ async def list( ) snapshot_read_result = await self._snapshot_read( - ranges=[read_range], consistency=consistency + dp_protobuf.SnapshotRead(ranges=[read_range]), consistency=consistency ) if isinstance(snapshot_read_result, Err): raise snapshot_read_result.error @@ -716,10 +734,25 @@ async def list( batch_start = parsed_key async def _snapshot_read( - self, ranges: Sequence[ReadRange], *, consistency: ConsistencyLevel + self, read: SnapshotRead, *, consistency: ConsistencyLevel ) -> _KvSnapshotReadResult: - read = SnapshotRead(ranges=ranges) - result: SnapshotReadResult + return await self._datapath_request( + partial(datapath.snapshot_read, read=read), consistency=consistency + ) + + async def _atomic_write(self, write: AtomicWrite) -> _KvAtomicWriteResult: + return await self._datapath_request( + partial(datapath.atomic_write, write=write), + consistency=ConsistencyLevel.STRONG, + ) + + async def _datapath_request( + self, + datapath_request: partial[Coroutine[Any, Any, Result[T, _DataPathErrorT]]], + *, + consistency: ConsistencyLevel, + ) -> Result[tuple[T, EndpointInfo], _DataPathErrorT]: + result: Result[T, _DataPathErrorT] endpoint: EndpointInfo for delay in attempts(self.retry_delays): # return error from this? @@ -738,11 +771,8 @@ async def _snapshot_read( endpoints = EndpointSelector(meta=cached_meta.value) endpoint = endpoints.get_endpoint(consistency) - result = await datapath.snapshot_read( - session=self.session, - meta=cached_meta.value, - endpoint=endpoint, - read=read, + result = await datapath_request( + session=self.session, meta=cached_meta.value, endpoint=endpoint ) if isinstance(result, Err): if result.error.auto_retry is AutoRetry.AFTER_BACKOFF: @@ -760,10 +790,77 @@ async def _snapshot_read( assert isinstance(result, Ok) return Ok((result.value, endpoint)) + def atomic(self, *operations: WriteOperation) -> PlannedWrite: + write = PlannedWrite(kv=self) + for op in operations: + if isinstance(op, Check): + write.check(op) + elif isinstance(op, Mutation): + write.mutate(op) + else: + assert isinstance(op, Enqueue) + write.enqueue(op) + return write + + @overload + async def write(self, *operations: WriteOperation) -> CompletedWrite: ... + + @overload + async def write(self, planned_write: PlannedWrite, /) -> CompletedWrite: ... + + async def write( + self, arg: PlannedWrite | WriteOperation | None = None, *args: WriteOperation + ) -> CompletedWrite: + if arg is None: + if args: + raise TypeError("arguments cannot be None") + # One None arg means 0 args were passed + planned_write = PlannedWrite() + elif isinstance(arg, PlannedWrite): + planned_write = arg + if args: + raise TypeError("unexpected arguments after PlannedWrite") + else: + planned_write = self.atomic(arg, *args) + + # Note that it's OK to submit a write with no operations. We get a + # versionstamp back. Submitting a write with only checks could be used + # to check if a key has been changed without reading the value. + result = await self._atomic_write( + planned_write.as_protobuf(v8_encoder=self.v8_encoder) + ) + + concrete_checks = [ + Check(key=key_ver.key, versionstamp=key_ver.versionstamp) + for key_ver in planned_write.checks + ] + + if isinstance(result, Err): + if isinstance(result.error, CheckFailure): + check_failure = result.error + return ConflictedWrite( + failed_checks=list(check_failure.failed_check_indexes), + checks=concrete_checks, + mutations=planned_write.mutations, + enqueues=planned_write.enqueues, + ) + raise result.error + + raw_versionstamp, endpoint = result.value + return CommittedWrite( + versionstamp=VersionStamp(raw_versionstamp), + checks=concrete_checks, + mutations=planned_write.mutations, + enqueues=planned_write.enqueues, + ) + _KvSnapshotReadResult: TypeAlias = Result[ tuple[SnapshotReadOutput, EndpointInfo], DataPathError ] +_KvAtomicWriteResult: TypeAlias = Result[ + tuple[bytes, EndpointInfo], CheckFailure | DataPathError +] @dataclass(frozen=True) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 3e17764..f0d5f1e 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -32,6 +32,7 @@ from denokv._kv_values import KvEntry from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp +from denokv._kv_writes import LimitExceededPolicy from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.protobuf import enum_name from denokv._pycompat.typing import Any @@ -52,7 +53,6 @@ from denokv.datapath import parse_protobuf_kv_entry from denokv.errors import InvalidCursor from denokv.kv import AnyCursorFormat -from denokv.kv import LimitExceededPolicy from denokv.kv import ListContext from denokv.kv import create_default_v8_encoder from denokv.kv_keys import KvKey diff --git a/test/test_kv.py b/test/test_kv.py index 7f94d11..9a01877 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -1,6 +1,7 @@ from __future__ import annotations import asyncio +import re import sys import weakref from contextlib import asynccontextmanager @@ -24,6 +25,7 @@ from hypothesis import settings from hypothesis import strategies as st from v8serialize import Decoder +from v8serialize.jstypes import JSMap from yarl import URL from denokv import datapath @@ -34,7 +36,11 @@ from denokv._datapath_pb2 import SnapshotReadStatus from denokv._datapath_pb2 import ValueEncoding from denokv._kv_values import KvEntry +from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp +from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAY_COUNT +from denokv._kv_writes import Limit +from denokv._kv_writes import LimitExceededPolicy from denokv._pycompat.enum import StrEnum from denokv._pycompat.typing import Any from denokv._pycompat.typing import AsyncGenerator @@ -76,6 +82,8 @@ from denokv.result import Err from denokv.result import Ok from denokv.result import Result +from denokv.result import is_err +from denokv.result import is_ok from test.advance_time import advance_time from test.denokv_testing import ExampleCursorFormat from test.denokv_testing import MockKvDb @@ -939,6 +947,264 @@ async def test_Kv_list__retries_retryable_snapshot_read_errors( assert len(auth_fn.mock_calls) == 7 +@pytest_mark_asyncio +async def test_Kv_write__set(kv: Kv) -> None: + _, before = await kv.get(("foo", 1)) + result = await kv.atomic().set(("foo", 1), "Hi").write() + _, after = await kv.get(("foo", 1)) + + assert before is None + assert is_ok(result) + assert after and after.value == "Hi" + + +@pytest_mark_asyncio +async def test_Kv_write__set_versioned(kv: Kv) -> None: + result = await kv.atomic().set(("foo", 1), "Hi", versioned=True).write() + assert is_ok(result) + _, entry = await kv.get(("foo", 1, str(result.versionstamp))) + assert entry and entry.value == "Hi" + + +ErrorPredicate: TypeAlias = Callable[[Exception], bool] + + +def match_client_error(server_msg_content: str) -> Callable[[Exception], bool]: + def is_client_error(e: Exception) -> bool: + return ( + isinstance(e, ResponseUnsuccessful) + and e.status == 400 + and server_msg_content in e.body_text + ) + + return is_client_error + + +def match_error( + kind: type[BaseException], + containing: str | None = None, + matching: str | re.Pattern[str] | None = None, +) -> ErrorPredicate: + if containing is not None: + if matching is not None: + raise ValueError("containing and matching args cannot both be set") + matching = re.escape(containing) + elif matching is None: + raise ValueError("containing or matching args must be set") + + def is_error(e: Exception) -> bool: + return isinstance(e, kind) and bool(re.search(matching, str(e))) + + return is_error + + +@asynccontextmanager +async def validate_write_outcome( + kv: Kv, + initial_val: object | None, + result: object | None | ErrorPredicate, +) -> AsyncGenerator[tuple[Kv, KvKeyTuple]]: + match_error: ErrorPredicate | None = result if callable(result) else None + + if initial_val is not None: + assert is_ok(await kv.atomic().set(("foo", 0), initial_val).write()) + try: + yield (kv, ("foo", 0)) + assert not match_error, "write succeeded but is expected to fail" + except AssertionError: + raise + except Exception as e: + if not match_error: + raise + assert match_error(e), f"Did not match error: {e!r}" + return + + (_, a) = await kv.get(("foo", 0)) + if result is None: + assert a is None + else: + assert a and a.value == result and type(a.value) is type(result) + + +# fmt: off +_params_test_Kv_write__sum = pytest.mark.parametrize( + "initial_val, sum_val, sum_kwargs, result", + [ + (12, 3, {}, 15), + (12, -3, {}, 9), + (None, 3, {}, 3), + (12.5, 2.5, {}, 15.0), + (12.5, -2.5, {}, 10.0), + (None, 2.5, {}, 2.5), + (None, -2.5, {}, -2.5), + (KvU64(12), KvU64(3), {}, KvU64(15)), + (KvU64(12), 3, {}, KvU64(15)), + (KvU64(12), -3, {}, KvU64(9)), + (None, KvU64(3), {}, KvU64(3)), + # KvU64 wraps on overflow + (KvU64(1), -3, {}, KvU64(2**64 - 2)), + (KvU64(2**64 - 2), 3, {}, KvU64(1)), + # Limits + (12, 10, dict(limit_min=10, limit_max=20, limit_exceeded="clamp"), 20), + (12, -10, dict(limit_min=10, limit_max=20, limit_exceeded="clamp"), 10), + (12.0, 10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="clamp"), 20.0), # noqa: E501 + (12.0, -10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="clamp"), 10.0), # noqa: E501 + # limit via Limit object + (12, -10, dict(limit=Limit(10, 20, 'clamp')), 10), + # kwargs override the limit object fields + (12, 10, dict(limit=Limit(9, 21, 'error'), limit_min=10, limit_max=20, limit_exceeded='clamp'), 20), # noqa: E501 + (12, -10, dict(limit=Limit(9, 21, 'error'), limit_min=10, limit_max=20, limit_exceeded='clamp'), 10), # noqa: E501 + # overflow with limit_exceeded error causes write to fail with client error + pytest.param(12, 10, dict(limit_min=10, limit_max=20, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-high-BigInt'), # noqa: E501 + pytest.param(12, -10, dict(limit_min=10, limit_max=20, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-low-BigInt'), # noqa: E501 + pytest.param(12.0, 10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-high-Number'), # noqa: E501 + pytest.param(12.0, -10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-low-Number'), # noqa: E501 + # Cannot use limit_exceeded other than wrap for KvU64 + pytest.param(KvU64(12), KvU64(1), dict(limit_exceeded="error"), lambda e: isinstance(e, ValueError) and "limit for KvU64 cannot be changed, it must be None or LIMIT_KVU64" == str(e), id='err-invalid-exceeded-KvU64'), # noqa: E501 + # Cannot use limit_exceeded wrap for BigInt/Number + pytest.param(1, 1, dict(limit_exceeded=LimitExceededPolicy.WRAP), match_error(ValueError, "limit for JavaScript BigInt or Number cannot be WRAP, it must be ERROR or CLAMP"), id='err-invalid-exceeded-BigInt'), # noqa: E501 + pytest.param(1.0, 1.0, dict(limit_exceeded=LimitExceededPolicy.WRAP), match_error(ValueError, "limit for JavaScript BigInt or Number cannot be WRAP, it must be ERROR or CLAMP"), id='err-invalid-exceeded-Number'), # noqa: E501 + ], +) +# fmt: on +@_params_test_Kv_write__sum +@pytest_mark_asyncio +async def test_Kv_write__sum( + kv: Kv, + initial_val: int | float | KvU64 | None, + sum_val: int | float | KvU64, + sum_kwargs: dict[str, Any], + result: int | float | KvU64 | Callable[[Exception], bool], +) -> None: + async with validate_write_outcome(kv, initial_val, result) as (kv, key): + assert is_ok(await kv.atomic().sum(key, sum_val, **sum_kwargs).write()) + + +@pytest.mark.parametrize( + "initial_val, max_val, max_kwargs, result", + [ + (KvU64(12), KvU64(3), {}, KvU64(12)), + (KvU64(3), KvU64(12), {}, KvU64(12)), + # Cannot use max() on non KvU64 stored value + (3, KvU64(12), {}, match_client_error("SnapshotWrite is not valid")), + ( + KvU64(1), + 2.0, + {}, + match_error(TypeError, "value must be 8 bytes or a 64-bit unsigned int"), + ), + ], +) +@pytest_mark_asyncio +async def test_Kv_write__max( + kv: Kv, + initial_val: int | float | KvU64 | None, + max_val: KvU64, + max_kwargs: dict[str, Any], + result: int | float | KvU64 | Callable[[Exception], bool], +) -> None: + async with validate_write_outcome(kv, initial_val, result): + assert is_ok(await kv.atomic().max(("foo", 0), max_val, **max_kwargs).write()) + + +@pytest.mark.parametrize( + "initial_val, min_val, min_kwargs, result", + [ + (KvU64(12), KvU64(3), {}, KvU64(3)), + (KvU64(3), KvU64(12), {}, KvU64(3)), + # Cannot use min() on non KvU64 stored value + (3, KvU64(12), {}, match_client_error("SnapshotWrite is not valid")), + ( + KvU64(1), + 2.0, + {}, + match_error(TypeError, "value must be 8 bytes or a 64-bit unsigned int"), + ), + ], +) +@pytest_mark_asyncio +async def test_Kv_write__min( + kv: Kv, + initial_val: int | float | KvU64 | None, + min_val: KvU64, + min_kwargs: dict[str, Any], + result: int | float | KvU64 | Callable[[Exception], bool], +) -> None: + async with validate_write_outcome(kv, initial_val, result): + assert is_ok(await kv.atomic().min(("foo", 0), min_val, **min_kwargs).write()) + + +@pytest.mark.parametrize("initial_val", [None, 42]) +@pytest_mark_asyncio +async def test_Kv_write__delete( + kv: Kv, initial_val: int | float | KvU64 | None +) -> None: + async with validate_write_outcome(kv, initial_val, result=None): + assert is_ok(await kv.atomic().delete(("foo", 0)).write()) + + +@pytest_mark_asyncio +async def test_Kv_write__check__allows_write_when_matching(kv: Kv) -> None: + async with validate_write_outcome(kv, None, result=42) as (kv, key): + assert is_ok(await kv.atomic().check(key, None).set(key, 42).write()) + + async with validate_write_outcome(kv, 41, result=42) as (kv, key): + _, initial = await kv.get(key) + assert initial + assert is_ok( + await kv.atomic().check(key, initial.versionstamp).set(key, 42).write() + ) + + +@pytest_mark_asyncio +async def test_Kv_write__check__fails_write_when_mismatching(kv: Kv) -> None: + async with validate_write_outcome(kv, None, result=None) as (kv, key): + result = await kv.atomic().check(key, VersionStamp(1)).set(key, 42).write() + assert is_err(result) + assert result.conflicts[key].versionstamp == VersionStamp(1) + + async with validate_write_outcome(kv, 41, result=42) as (kv, key): + _, initial = await kv.get(key) + assert initial + assert is_ok( + await kv.atomic().check(key, initial.versionstamp).set(key, 42).write() + ) + # Try to change from original version + result = await kv.atomic().check(key, initial.versionstamp).set(key, 80).write() + assert is_err(result) + assert result.conflicts[key].versionstamp == initial.versionstamp + + +@pytest_mark_asyncio +async def test_Kv_write__enqueue(kv: Kv, mock_db: MockKvDb) -> None: + assert len(mock_db.queued_messages) == 0 + + t = datetime.now() + timedelta(seconds=60) + await ( + kv.atomic() + .enqueue( + {"foo": "bar"}, + delivery_time=t, + retry_delays=[1, 2], + dead_letter_keys=[("foo", 1), ("bar", 2)], + ) + .enqueue({"baz": "boz"}) + .write() + ) + + assert len(mock_db.queued_messages) == 2 + a, b = mock_db.queued_messages + assert a.payload == JSMap(foo="bar") + assert a.deadline_ms == pytest.approx(t.timestamp() * 1000, rel=1) + assert a.backoff_schedule == [1000, 2000] # milliseconds + assert a.keys_if_undelivered == [KvKey("foo", 1), KvKey("bar", 2)] + + assert b.payload == JSMap(baz="boz") + assert b.deadline_ms == 0 + assert len(b.backoff_schedule) == DEFAULT_ENQUEUE_RETRY_DELAY_COUNT + assert b.keys_if_undelivered == [] + + @pytest_mark_asyncio async def test_aclose() -> None: authenticator = Mock() From f7945b182507982c1c0349f6dc2c6c82fd711fed Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 08:15:01 +0000 Subject: [PATCH 12/52] refactor: move mock Data Path API into denokv_testing We need to use it in both test_kv and test_datapath. --- test/denokv_testing.py | 113 +++++++++++++++++++++++++++++++++++++++++ test/test_datapath.py | 97 ++--------------------------------- 2 files changed, 116 insertions(+), 94 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index f0d5f1e..39cba31 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -1,6 +1,7 @@ from __future__ import annotations import math +import re import sys from base64 import b16decode from base64 import b16encode @@ -10,13 +11,16 @@ from datetime import datetime from datetime import timedelta from itertools import groupby +from typing import Literal from typing import overload from uuid import UUID import v8serialize import v8serialize.encode +from aiohttp import web from fdb.tuple import pack from fdb.tuple import unpack +from google.protobuf.message import Message from denokv._datapath_pb2 import AtomicWrite from denokv._datapath_pb2 import AtomicWriteOutput @@ -28,6 +32,9 @@ from denokv._datapath_pb2 import MutationType from denokv._datapath_pb2 import ReadRange from denokv._datapath_pb2 import ReadRangeOutput +from denokv._datapath_pb2 import SnapshotRead +from denokv._datapath_pb2 import SnapshotReadOutput +from denokv._datapath_pb2 import SnapshotReadStatus from denokv._datapath_pb2 import ValueEncoding from denokv._kv_values import KvEntry from denokv._kv_values import KvU64 @@ -36,13 +43,16 @@ from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.protobuf import enum_name from denokv._pycompat.typing import Any +from denokv._pycompat.typing import Callable from denokv._pycompat.typing import ClassVar +from denokv._pycompat.typing import Final from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import NamedTuple from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeIs from denokv._pycompat.typing import TypeVar +from denokv._pycompat.typing import cast from denokv.auth import DatabaseMetadata from denokv.auth import EndpointInfo from denokv.datapath import AnyKvKey @@ -65,6 +75,7 @@ T = TypeVar("T") E = TypeVar("E") E2 = TypeVar("E2") +MessageT = TypeVar("MessageT", bound=Message) v8_decoder = v8serialize.Decoder() v8_bigint_encoder = create_default_v8_encoder() @@ -569,6 +580,108 @@ def decode_enqueue_message(enqueue: Enqueue) -> MockKvDbMessage: ) +def mock_db_api(mock_db: MockKvDb) -> web.Application: + """HTTP endpoints implementing the KV Data Path protocol against MockKvDb.""" + + def get_server_version(request: web.Request) -> Literal[1, 2, 3]: + match = re.match(r"^/v([123])/", request.path) + version: Final = int(match.group(1)) if match else -1 + if version not in (1, 2, 3): + raise AssertionError("handler is not registered at /v[123]/ URL path") + return cast(Literal[1, 2, 3], version) + + def validate_request(request: web.Request) -> None: + server_version = get_server_version(request) + + if request.method != "POST": + raise web.HTTPBadRequest(body="method must be POST") + if request.content_type != "application/x-protobuf": + raise web.HTTPBadRequest(body="content-type must be application/x-protobuf") + + db_id_header = ( + "x-transaction-domain-id" if server_version == 1 else "x-denokv-database-id" + ) + try: + UUID(request.headers.get(db_id_header, "")) + except Exception: + raise web.HTTPBadRequest( + body=f"client did not set a valid {db_id_header} when talking to a " + f"v{server_version} server" + ) from None + + if server_version > 2: + try: + client_version = int(request.headers.get("x-denokv-version", "")) + if client_version not in (2, 3): + raise ValueError(f"invalid client_version: {client_version}") + except Exception: + raise web.HTTPBadRequest( + body=f"client did not set a valid x-denokv-version header when " + f"talking to a v{server_version} server" + ) from None + + def parse_protobuf_body( + body_bytes: bytes, message_type: type[MessageT] + ) -> MessageT: + message = message_type() + try: + count = message.ParseFromString(body_bytes) + if len(body_bytes) != count: + raise ValueError( + f"{len(body_bytes) - count} trailing bytes after " + f"{message_type.__name__}" + ) + except Exception as e: + raise web.HTTPBadRequest( + body=f"body is not a valid {message_type.__name__} message: {e}" + ) from e + return message + + # Valid snapshot_read handler + async def strong_snapshot_read(request: web.Request) -> web.Response: + validate_request(request) + read = parse_protobuf_body(await request.read(), SnapshotRead) + + read_result = SnapshotReadOutput( + status=SnapshotReadStatus.SR_SUCCESS, + read_is_strongly_consistent=True, + ranges=[mock_db.snapshot_read_range(r) for r in read.ranges], + ) + return web.Response( + status=200, + content_type="application/x-protobuf", + body=read_result.SerializeToString(), + ) + + # Valid atomic_write handler + async def atomic_write(request: web.Request) -> web.Response: + validate_request(request) + + write = parse_protobuf_body(await request.read(), AtomicWrite) + + try: + write_result = mock_db.atomic_write(write) + except ValueError as e: + raise web.HTTPBadRequest(body=f"SnapshotWrite is not valid: {e}") from e + + return web.Response( + status=200, + content_type="application/x-protobuf", + body=write_result.SerializeToString(), + ) + + app = web.Application() + + # Working endpoints + app.router.add_post("/v1/consistency/strong/snapshot_read", strong_snapshot_read) + app.router.add_post("/v2/consistency/strong/snapshot_read", strong_snapshot_read) + app.router.add_post("/v3/consistency/strong/snapshot_read", strong_snapshot_read) + app.router.add_post("/v1/consistency/strong/atomic_write", atomic_write) + app.router.add_post("/v2/consistency/strong/atomic_write", atomic_write) + app.router.add_post("/v3/consistency/strong/atomic_write", atomic_write) + return app + + def add_entries( db: MockKvDb, entries: Mapping[KvKeyTuple, object] | Iterable[tuple[KvKeyTuple, object]], diff --git a/test/test_datapath.py b/test/test_datapath.py index 139bc68..9b76dc6 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -80,6 +80,7 @@ from denokv.result import is_ok from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries +from test.denokv_testing import mock_db_api from test.denokv_testing import nextafter from test.denokv_testing import unsafe_parse_protobuf_kv_entry from test.denokv_testing import v8_bigint_encoder @@ -113,92 +114,7 @@ def example_entries() -> Mapping[KvKeyTuple, object]: @pytest.fixture def db_api(mock_db: MockKvDb) -> web.Application: - def get_server_version(request: web.Request) -> Literal[1, 2, 3]: - match = re.match(r"^/v([123])/", request.path) - version: Final = int(match.group(1)) if match else -1 - if version not in (1, 2, 3): - raise AssertionError("handler is not registered at /v[123]/ URL path") - return cast(Literal[1, 2, 3], version) - - def validate_request(request: web.Request) -> None: - server_version = get_server_version(request) - - if request.method != "POST": - raise web.HTTPBadRequest(body="method must be POST") - if request.content_type != "application/x-protobuf": - raise web.HTTPBadRequest(body="content-type must be application/x-protobuf") - - db_id_header = ( - "x-transaction-domain-id" if server_version == 1 else "x-denokv-database-id" - ) - try: - UUID(request.headers.get(db_id_header, "")) - except Exception: - raise web.HTTPBadRequest( - body=f"client did not set a valid {db_id_header} when talking to a " - f"v{server_version} server" - ) from None - - if server_version > 2: - try: - client_version = int(request.headers.get("x-denokv-version", "")) - if client_version not in (2, 3): - raise ValueError(f"invalid client_version: {client_version}") - except Exception: - raise web.HTTPBadRequest( - body=f"client did not set a valid x-denokv-version header when " - f"talking to a v{server_version} server" - ) from None - - def parse_protobuf_body( - body_bytes: bytes, message_type: type[MessageT] - ) -> MessageT: - message = message_type() - try: - count = message.ParseFromString(body_bytes) - if len(body_bytes) != count: - raise ValueError( - f"{len(body_bytes) - count} trailing bytes after " - f"{message_type.__name__}" - ) - except Exception as e: - raise web.HTTPBadRequest( - body=f"body is not a valid {message_type.__name__} message: {e}" - ) from e - return message - - # Valid snapshot_read handler - async def strong_snapshot_read(request: web.Request) -> web.Response: - validate_request(request) - read = parse_protobuf_body(await request.read(), SnapshotRead) - - read_result = SnapshotReadOutput( - status=SnapshotReadStatus.SR_SUCCESS, - read_is_strongly_consistent=True, - ranges=[mock_db.snapshot_read_range(r) for r in read.ranges], - ) - return web.Response( - status=200, - content_type="application/x-protobuf", - body=read_result.SerializeToString(), - ) - - # Valid atomic_write handler - async def atomic_write(request: web.Request) -> web.Response: - validate_request(request) - - write = parse_protobuf_body(await request.read(), AtomicWrite) - - try: - write_result = mock_db.atomic_write(write) - except ValueError as e: - raise web.HTTPBadRequest(body=f"SnapshotWrite is not valid: {e}") from e - - return web.Response( - status=200, - content_type="application/x-protobuf", - body=write_result.SerializeToString(), - ) + """HTTP endpoints backed by a MockKvDb, plus various misbehaving endpoints.""" # Generic Data Path errors async def violation_2xx_text_body(request: web.Request) -> web.Response: @@ -398,7 +314,7 @@ def add_datapath_post(app: web.Application, path: str, handler: Handler) -> None for req_kind in _DataPathRequestKind: app.router.add_post(f"{path}/{req_kind.value}", handler) - app = web.Application() + app = mock_db_api(mock_db) # Generic error endpoints add_datapath_post(app, "/violation_2xx_text_body", violation_2xx_text_body) @@ -459,13 +375,6 @@ def add_datapath_post(app: web.Application, path: str, handler: Handler) -> None "/invalid_status/atomic_write", violation_atomic_write_invalid_status ) - # Working endpoints - app.router.add_post("/v1/consistency/strong/snapshot_read", strong_snapshot_read) - app.router.add_post("/v2/consistency/strong/snapshot_read", strong_snapshot_read) - app.router.add_post("/v3/consistency/strong/snapshot_read", strong_snapshot_read) - app.router.add_post("/v1/consistency/strong/atomic_write", atomic_write) - app.router.add_post("/v2/consistency/strong/atomic_write", atomic_write) - app.router.add_post("/v3/consistency/strong/atomic_write", atomic_write) return app From cd9b0d705831de4a6cd9a4b2410497c60edcdd72 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 08:17:09 +0000 Subject: [PATCH 13/52] refactor: use denokv_testing's mock HTTP API in test_kv --- test/test_kv.py | 113 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 75 insertions(+), 38 deletions(-) diff --git a/test/test_kv.py b/test/test_kv.py index 9a01877..08af41d 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -10,6 +10,7 @@ from datetime import timedelta from functools import partial from itertools import repeat +from typing import Literal from unittest.mock import AsyncMock from unittest.mock import Mock from unittest.mock import patch @@ -19,6 +20,8 @@ import pytest import pytest_asyncio import v8serialize +from aiohttp import web +from aiohttp.test_utils import TestClient as _TestClient from fdb.tuple import unpack from hypothesis import HealthCheck from hypothesis import given @@ -44,6 +47,7 @@ from denokv._pycompat.enum import StrEnum from denokv._pycompat.typing import Any from denokv._pycompat.typing import AsyncGenerator +from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable from denokv._pycompat.typing import Generator from denokv._pycompat.typing import Mapping @@ -90,8 +94,11 @@ from test.denokv_testing import add_entries from test.denokv_testing import assume_ok from test.denokv_testing import mk_db_meta +from test.denokv_testing import mock_db_api from test.denokv_testing import unsafe_parse_protobuf_kv_entry +TestClient: TypeAlias = _TestClient[web.Request, web.Application] + pytest_mark_asyncio = pytest.mark.asyncio() @@ -310,22 +317,39 @@ def mock_snapshot_read() -> Generator[Mock]: yield mock +@pytest.fixture +def mock_atomic_write() -> Generator[Mock]: + mock = AsyncMock(side_effect=NotImplementedError) + with patch("denokv.datapath.atomic_write", mock) as mock: + yield mock + + @pytest.fixture def retry_delays() -> Backoff: return () -@pytest_asyncio.fixture -async def client_session() -> AsyncGenerator[aiohttp.ClientSession]: - async with aiohttp.ClientSession() as cs: - yield cs +@pytest.fixture +def client_session(client: TestClient) -> aiohttp.ClientSession: + return client.session + + +@pytest.fixture(params=[1, 2, 3]) +def datapath_version(request: pytest.FixtureRequest) -> Literal[1, 2, 3]: + assert request.param in (1, 2, 3) + return cast(Literal[1, 2, 3], request.param) @pytest.fixture -def meta() -> DatabaseMetadata: - return mk_db_meta( - [EndpointInfo(URL("https://example.com/"), ConsistencyLevel.STRONG)] - ) +def datapath_endpoint_url( + client: TestClient, datapath_version: Literal[1, 2, 3] +) -> URL: + return client.make_url(f"/v{datapath_version}/consistency/strong/") + + +@pytest.fixture +def meta(datapath_endpoint_url: URL) -> DatabaseMetadata: + return mk_db_meta([EndpointInfo(datapath_endpoint_url, ConsistencyLevel.STRONG)]) @pytest.fixture @@ -347,7 +371,25 @@ def kv_flags() -> KvFlags: @pytest.fixture -def create_db( +def mock_db() -> MockKvDb: + return MockKvDb() + + +@pytest.fixture +def db_api(mock_db: MockKvDb) -> web.Application: + return mock_db_api(mock_db) + + +@pytest_asyncio.fixture +async def client( + db_api: web.Application, + aiohttp_client: Callable[[web.Application], Awaitable[TestClient]], +) -> TestClient: + return await aiohttp_client(db_api) + + +@pytest.fixture +def create_kv( client_session: aiohttp.ClientSession, auth_fn: AuthenticatorFn, retry_delays: Backoff, @@ -365,8 +407,8 @@ def create_db( @pytest.fixture -def db(create_db: partial[Kv]) -> Kv: - return create_db() +def kv(create_kv: partial[Kv]) -> Kv: + return create_kv() @pytest.fixture @@ -399,19 +441,19 @@ def pack_kv_entry( @pytest_mark_asyncio async def test_Kv_get__rejects_invalid_arguments( - db: Kv, mock_snapshot_read: AsyncMock + kv: Kv, mock_snapshot_read: AsyncMock ) -> None: with pytest.raises( TypeError, match=r"cannot use positional keys and keys keyword argument" ): - await db.get(("a", 1), keys=[("a", 2)]) # type: ignore[call-overload] + await kv.get(("a", 1), keys=[("a", 2)]) # type: ignore[call-overload] with pytest.raises(TypeError, match=r"at least one key argument must be passed"): - await db.get() # type: ignore[call-overload] + await kv.get() # type: ignore[call-overload] @pytest_mark_asyncio async def test_Kv_get__returns_single_value_for_single_key( - db: Kv, mock_snapshot_read: AsyncMock + kv: Kv, mock_snapshot_read: AsyncMock ) -> None: read_output = SnapshotReadOutput( ranges=[ReadRangeOutput(values=[pack_kv_entry(("a", 1), b"x")])], @@ -423,7 +465,7 @@ async def test_Kv_get__returns_single_value_for_single_key( mock_snapshot_read.side_effect = None mock_snapshot_read.return_value = Ok(read_output) - k, kval = await db.get(("a", 1)) + k, kval = await kv.get(("a", 1)) assert k == ("a", 1) assert kval is not None @@ -441,7 +483,7 @@ class ArgKind(StrEnum): @pytest.mark.parametrize("arg_kind", ArgKind) @pytest_mark_asyncio async def test_Kv_get__returns_n_values_for_n_keys( - n: int, arg_kind: ArgKind, db: Kv, mock_snapshot_read: AsyncMock + n: int, arg_kind: ArgKind, kv: Kv, mock_snapshot_read: AsyncMock ) -> None: read_output = SnapshotReadOutput( ranges=[ @@ -461,9 +503,9 @@ async def test_Kv_get__returns_n_values_for_n_keys( mock_snapshot_read.return_value = Ok(read_output) if arg_kind is ArgKind.KWARGS: - values = await db.get(keys=[("i", i) for i in range(n)]) + values = await kv.get(keys=[("i", i) for i in range(n)]) else: - values = await db.get(*[("i", i) for i in range(n)]) + values = await kv.get(*[("i", i) for i in range(n)]) assert isinstance(values, tuple) assert len(values) == n @@ -483,7 +525,7 @@ async def test_Kv_get__returns_n_values_for_n_keys( ) @pytest_mark_asyncio async def test_Kv_get__treats_int_as_float_when_IntAsNumber_enabled( - db: Kv, mock_snapshot_read: AsyncMock, int_type: type + kv: Kv, mock_snapshot_read: AsyncMock, int_type: type ) -> None: read_output = SnapshotReadOutput( ranges=[ReadRangeOutput(values=[pack_kv_entry(("a", int_type(1)), b"x")])], @@ -495,7 +537,7 @@ async def test_Kv_get__treats_int_as_float_when_IntAsNumber_enabled( mock_snapshot_read.side_effect = None mock_snapshot_read.return_value = Ok(read_output) - k, kval = await db.get(("a", 1)) + k, kval = await kv.get(("a", 1)) assert k == ("a", 1) # 1 == 1.0 assert type(k[1]) is int_type @@ -554,14 +596,14 @@ def retryable_errors( @given(data=st.data(), retry_delays=st.sampled_from([[], [1.0], [1.0, 2.0, 4.0]])) @pytest_mark_asyncio async def test_Kv_get__retries_retryable_snapshot_read_errors( - create_db: partial[Kv], + create_kv: partial[Kv], meta: DatabaseMetadata, mock_snapshot_read: AsyncMock, data: st.DataObject, retry_delays: Backoff, ) -> None: auth_fn = AsyncMock(name="auth_fn", return_value=Ok(meta)) - db = create_db(retry=retry_delays, auth=auth_fn) + db = create_kv(retry=retry_delays, auth=auth_fn) retry_errors: list[DataPathError] = [] def fail_with_retryable_error(*args: Any, **kwargs: Any) -> Err[DenoKvError]: @@ -598,17 +640,17 @@ def fail_with_retryable_error(*args: Any, **kwargs: Any) -> Err[DenoKvError]: @pytest_mark_asyncio -async def test_Kv_list__rejects_invalid_arguments(db: Kv) -> None: +async def test_Kv_list__rejects_invalid_arguments(kv: Kv) -> None: with pytest.raises(ValueError, match=r"limit cannot be negative"): - async for _ in db.list(limit=-1): + async for _ in kv.list(limit=-1): raise AssertionError("should not generate values") with pytest.raises(ValueError, match=r"batch_size cannot be < 1"): - async for _ in db.list(batch_size=0): + async for _ in kv.list(batch_size=0): raise AssertionError("should not generate values") with pytest.raises(InvalidCursor, match=r"cursor is not valid URL-safe base64"): - async for _ in db.list(cursor="x"): + async for _ in kv.list(cursor="x"): raise AssertionError("should not generate values") @@ -627,12 +669,12 @@ def pack_example_cursor(key: KvKeyTuple) -> str: ) @pytest_mark_asyncio async def test_Kv_list__rejects_cursor_outside_listed_range( - db: Kv, range_options: KvListOptions, cursor_key: KvKeyTuple + kv: Kv, range_options: KvListOptions, cursor_key: KvKeyTuple ) -> None: with pytest.raises( InvalidCursor, match=r"cursor is not within the the start and end key range" ): - async for _ in db.list( + async for _ in kv.list( **KvListOptions( **range_options, cursor_format_type=ExampleCursorFormat, @@ -642,11 +684,6 @@ async def test_Kv_list__rejects_cursor_outside_listed_range( raise AssertionError("should not generate values") -@pytest.fixture -def mock_db() -> MockKvDb: - return MockKvDb() - - @pytest.fixture def list_example_entries() -> Mapping[KvKeyTuple, object]: return { @@ -735,7 +772,7 @@ def list_example_cursors( @pytest_mark_asyncio async def test_Kv_list__generates_values_from_sequential_snapshot_reads( data: st.DataObject, - db: Kv, + kv: Kv, mock_snapshot_read_to_return_mock_db_results: Callable[[], AsyncMock], mock_db: MockKvDb, list_example_entries: Mapping[KvKeyTuple, object], @@ -784,7 +821,7 @@ async def test_Kv_list__generates_values_from_sequential_snapshot_reads( ] results: list[tuple[KvKeyTuple, object, VersionStamp]] = [] - async for kv_entry in db.list( + async for kv_entry in kv.list( prefix=prefix, start=start, end=end, @@ -823,7 +860,7 @@ async def test_Kv_list__generates_values_from_sequential_snapshot_reads( @pytest_mark_asyncio async def test_Kv_list__retries_retryable_snapshot_read_errors( - create_db: partial[Kv], + create_kv: partial[Kv], meta: DatabaseMetadata, mock_snapshot_read: AsyncMock, # mock_snapshot_read_to_return_mock_db_results: Callable[[], AsyncMock], @@ -831,7 +868,7 @@ async def test_Kv_list__retries_retryable_snapshot_read_errors( # list_example_entries: Mapping[KvKeyTuple, object], ) -> None: auth_fn = AsyncMock(name="auth_fn", return_value=Ok(meta)) - db = create_db(retry=repeat(0), auth=auth_fn) + db = create_kv(retry=repeat(0), auth=auth_fn) auth_fn.side_effect = [ Err(MetadataExchangeDenoKvError("Failed", retryable=True)), From 2a01ecb7cab9ba42ed8a3767e2d957100e694050 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 08:27:29 +0000 Subject: [PATCH 14/52] test: replace deprecated body= arg for HTTP exceptions --- test/denokv_testing.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 39cba31..103c55c 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -594,9 +594,9 @@ def validate_request(request: web.Request) -> None: server_version = get_server_version(request) if request.method != "POST": - raise web.HTTPBadRequest(body="method must be POST") + raise web.HTTPBadRequest(text="method must be POST") if request.content_type != "application/x-protobuf": - raise web.HTTPBadRequest(body="content-type must be application/x-protobuf") + raise web.HTTPBadRequest(text="content-type must be application/x-protobuf") db_id_header = ( "x-transaction-domain-id" if server_version == 1 else "x-denokv-database-id" @@ -605,7 +605,7 @@ def validate_request(request: web.Request) -> None: UUID(request.headers.get(db_id_header, "")) except Exception: raise web.HTTPBadRequest( - body=f"client did not set a valid {db_id_header} when talking to a " + text=f"client did not set a valid {db_id_header} when talking to a " f"v{server_version} server" ) from None @@ -616,7 +616,7 @@ def validate_request(request: web.Request) -> None: raise ValueError(f"invalid client_version: {client_version}") except Exception: raise web.HTTPBadRequest( - body=f"client did not set a valid x-denokv-version header when " + text=f"client did not set a valid x-denokv-version header when " f"talking to a v{server_version} server" ) from None @@ -633,7 +633,7 @@ def parse_protobuf_body( ) except Exception as e: raise web.HTTPBadRequest( - body=f"body is not a valid {message_type.__name__} message: {e}" + text=f"body is not a valid {message_type.__name__} message: {e}" ) from e return message @@ -662,7 +662,7 @@ async def atomic_write(request: web.Request) -> web.Response: try: write_result = mock_db.atomic_write(write) except ValueError as e: - raise web.HTTPBadRequest(body=f"SnapshotWrite is not valid: {e}") from e + raise web.HTTPBadRequest(text=f"SnapshotWrite is not valid: {e}") from e return web.Response( status=200, From f539d8fc204ca4963895b32aed3c3665ea0060d4 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 08:36:45 +0000 Subject: [PATCH 15/52] refactor: move make_database_metadata_for_endpoint Now in denokv_testing so use it in test_kv. --- test/denokv_testing.py | 27 +++++++++++++++++++++++++++ test/test_datapath.py | 26 +------------------------- 2 files changed, 28 insertions(+), 25 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 103c55c..60b3551 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -21,6 +21,7 @@ from fdb.tuple import pack from fdb.tuple import unpack from google.protobuf.message import Message +from yarl import URL from denokv._datapath_pb2 import AtomicWrite from denokv._datapath_pb2 import AtomicWriteOutput @@ -53,6 +54,7 @@ from denokv._pycompat.typing import TypeIs from denokv._pycompat.typing import TypeVar from denokv._pycompat.typing import cast +from denokv.auth import ConsistencyLevel from denokv.auth import DatabaseMetadata from denokv.auth import EndpointInfo from denokv.datapath import AnyKvKey @@ -682,6 +684,31 @@ async def atomic_write(request: web.Request) -> web.Response: return app +def make_database_metadata_for_endpoint( + endpoint_url: URL, + endpoint_consistency: ConsistencyLevel = ConsistencyLevel.STRONG, + version: Literal[1, 2, 3] = 3, + database_id: UUID | None = None, + expires_at: datetime | None = None, + token: str = "hunter2.123", +) -> tuple[DatabaseMetadata, EndpointInfo]: + if database_id is None: + database_id = UUID("00000000-0000-0000-0000-000000000000") + if expires_at is None: + expires_at = datetime.now() + timedelta(minutes=30) + + endpoint = EndpointInfo(url=endpoint_url, consistency=endpoint_consistency) + + meta = DatabaseMetadata( + version=version, + database_id=database_id, + endpoints=[endpoint], + expires_at=expires_at, + token=token, + ) + return meta, endpoint + + def add_entries( db: MockKvDb, entries: Mapping[KvKeyTuple, object] | Iterable[tuple[KvKeyTuple, object]], diff --git a/test/test_datapath.py b/test/test_datapath.py index 9b76dc6..b32b304 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -80,6 +80,7 @@ from denokv.result import is_ok from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries +from test.denokv_testing import make_database_metadata_for_endpoint from test.denokv_testing import mock_db_api from test.denokv_testing import nextafter from test.denokv_testing import unsafe_parse_protobuf_kv_entry @@ -386,31 +387,6 @@ async def client( return await aiohttp_client(db_api) -def make_database_metadata_for_endpoint( - endpoint_url: URL, - endpoint_consistency: ConsistencyLevel = ConsistencyLevel.STRONG, - version: Literal[1, 2, 3] = 3, - database_id: UUID | None = None, - expires_at: datetime | None = None, - token: str = "hunter2.123", -) -> tuple[DatabaseMetadata, EndpointInfo]: - if database_id is None: - database_id = UUID("00000000-0000-0000-0000-000000000000") - if expires_at is None: - expires_at = datetime.now() + timedelta(minutes=30) - - endpoint = EndpointInfo(url=endpoint_url, consistency=endpoint_consistency) - - meta = DatabaseMetadata( - version=version, - database_id=database_id, - endpoints=[endpoint], - expires_at=expires_at, - token=token, - ) - return meta, endpoint - - @pytest.mark.parametrize( "datapath_request_fn", [ From c0e429a2d17aae92746cc3dbcb3303376def8790 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 08:55:22 +0000 Subject: [PATCH 16/52] refactor: generalise make_database_metadata_for_endpoint It's now make_database_metadata() and a separate meta_endpoint() fn to unpack it. We'll use make_database_metadata() to replace the similar and overlapping mk_db_meta() function. --- test/denokv_testing.py | 29 +++++++++++++++++++++-------- test/test_datapath.py | 28 +++++++++++++--------------- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 60b3551..f9f1034 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -684,29 +684,42 @@ async def atomic_write(request: web.Request) -> web.Response: return app -def make_database_metadata_for_endpoint( - endpoint_url: URL, - endpoint_consistency: ConsistencyLevel = ConsistencyLevel.STRONG, +def make_database_metadata( + endpoints: URL | Sequence[EndpointInfo], + *, + endpoint_consistency: ConsistencyLevel | None = None, version: Literal[1, 2, 3] = 3, database_id: UUID | None = None, expires_at: datetime | None = None, token: str = "hunter2.123", -) -> tuple[DatabaseMetadata, EndpointInfo]: +) -> DatabaseMetadata: + if isinstance(endpoints, URL): + if endpoint_consistency is None: + endpoint_consistency = ConsistencyLevel.STRONG + endpoints = [EndpointInfo(url=endpoints, consistency=endpoint_consistency)] + else: + if endpoint_consistency is not None: + raise TypeError( + "cannot set endpoint_consistency argument wen endpoints is a Sequence" + ) + if database_id is None: database_id = UUID("00000000-0000-0000-0000-000000000000") if expires_at is None: expires_at = datetime.now() + timedelta(minutes=30) - endpoint = EndpointInfo(url=endpoint_url, consistency=endpoint_consistency) - meta = DatabaseMetadata( version=version, database_id=database_id, - endpoints=[endpoint], + endpoints=endpoints, expires_at=expires_at, token=token, ) - return meta, endpoint + return meta + + +def meta_endpoint(meta: DatabaseMetadata) -> tuple[DatabaseMetadata, EndpointInfo]: + return meta, meta.endpoints[0] def add_entries( diff --git a/test/test_datapath.py b/test/test_datapath.py index b32b304..99087e1 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -3,10 +3,7 @@ import functools import re import struct -from datetime import datetime -from datetime import timedelta from typing import Literal -from uuid import UUID import pytest import pytest_asyncio @@ -43,14 +40,12 @@ from denokv._kv_values import VersionStamp from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable -from denokv._pycompat.typing import Final from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeAlias from denokv._pycompat.typing import TypeVar from denokv._pycompat.typing import cast from denokv.auth import ConsistencyLevel -from denokv.auth import DatabaseMetadata from denokv.auth import EndpointInfo from denokv.datapath import KV_KEY_PIECE_TYPES from denokv.datapath import AutoRetry @@ -80,7 +75,8 @@ from denokv.result import is_ok from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries -from test.denokv_testing import make_database_metadata_for_endpoint +from test.denokv_testing import make_database_metadata +from test.denokv_testing import meta_endpoint from test.denokv_testing import mock_db_api from test.denokv_testing import nextafter from test.denokv_testing import unsafe_parse_protobuf_kv_entry @@ -407,7 +403,7 @@ async def test_datapath_request_function__handles_network_error( server_url = client.make_url("/") server_url = server_url.with_port(unused_tcp_port_factory()) - meta, endpoint = make_database_metadata_for_endpoint(endpoint_url=server_url) + meta, endpoint = meta_endpoint(make_database_metadata(endpoints=server_url)) # will fail to connect to URL with nothing listening on the port result = await datapath_request_fn( @@ -554,7 +550,7 @@ async def test_snapshot_read__handles_unsuccessful_responses( mk_error: Callable[[EndpointInfo], DataPathDenoKvError], ) -> None: server_url = client.make_url(path) - meta, endpoint = make_database_metadata_for_endpoint(endpoint_url=server_url) + meta, endpoint = meta_endpoint(make_database_metadata(endpoints=server_url)) error = mk_error(endpoint) assert isinstance(error, DataPathDenoKvError) read = SnapshotRead(ranges=[]) @@ -683,8 +679,8 @@ async def test_snapshot_read__reads_expected_values( version: Literal[1, 2, 3], ) -> None: server_url = client.make_url(f"/v{version}/consistency/strong/") - meta, endpoint = make_database_metadata_for_endpoint( - endpoint_url=server_url, version=version + meta, endpoint = meta_endpoint( + make_database_metadata(endpoints=server_url, version=version) ) ver = add_entries(mock_db, example_entries) @@ -714,8 +710,10 @@ async def test_atomic_write__raises_when_given_endpoint_without_strong_consisten client: TestClient, ) -> None: # this is considered an avoidable programmer error, so it raises - meta, eventual_endpoint = make_database_metadata_for_endpoint( - URL("https://example/"), endpoint_consistency=ConsistencyLevel.EVENTUAL + meta, eventual_endpoint = meta_endpoint( + make_database_metadata( + URL("https://example/"), endpoint_consistency=ConsistencyLevel.EVENTUAL + ) ) with pytest.raises( ValueError, @@ -825,7 +823,7 @@ async def test_atomic_write__handles_unsuccessful_responses( mk_error: Callable[[EndpointInfo], DataPathDenoKvError], ) -> None: server_url = client.make_url(path) - meta, endpoint = make_database_metadata_for_endpoint(endpoint_url=server_url) + meta, endpoint = meta_endpoint(make_database_metadata(endpoints=server_url)) error = mk_error(endpoint) assert isinstance(error, DataPathDenoKvError) @@ -910,8 +908,8 @@ async def test_atomic_write__writes_expected_values( version: Literal[1, 2, 3], ) -> None: server_url = client.make_url(f"/v{version}/consistency/strong/") - meta, endpoint = make_database_metadata_for_endpoint( - endpoint_url=server_url, version=version + meta, endpoint = meta_endpoint( + make_database_metadata(endpoints=server_url, version=version) ) add_entries(mock_db, example_entries_write) From 7b99acd67c9b51a549745cadb26fdca70a530224 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 09:01:48 +0000 Subject: [PATCH 17/52] refactor: remove mk_db_meta() make_database_metadata() covers its use-case now. --- test/denokv_testing.py | 11 ----------- test/test_kv.py | 8 ++++---- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index f9f1034..d809093 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -107,17 +107,6 @@ def assume_err(result: Result[T, E], type: type[E2] | None = None) -> E | E2: ) -def mk_db_meta(endpoints: Sequence[EndpointInfo]) -> DatabaseMetadata: - """Create a placeholder DB meta object with the provided endpoints.""" - return DatabaseMetadata( - version=3, - database_id=UUID("00000000-0000-0000-0000-000000000000"), - expires_at=datetime.now() + timedelta(hours=1), - endpoints=[*endpoints], - token="secret", - ) - - @dataclass(**slots_if310(), frozen=True) class KvWriteValue: data: bytes diff --git a/test/test_kv.py b/test/test_kv.py index 08af41d..897c7ea 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -93,7 +93,7 @@ from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries from test.denokv_testing import assume_ok -from test.denokv_testing import mk_db_meta +from test.denokv_testing import make_database_metadata from test.denokv_testing import mock_db_api from test.denokv_testing import unsafe_parse_protobuf_kv_entry @@ -103,7 +103,7 @@ def test_EndpointSelector__rejects_meta_without_strong_endpoint() -> None: - meta_no_strong = mk_db_meta( + meta_no_strong = make_database_metadata( [ EndpointInfo( url=URL("https://example.com/eventual/"), @@ -117,7 +117,7 @@ def test_EndpointSelector__rejects_meta_without_strong_endpoint() -> None: def test_EndpointSelector__single() -> None: - meta = mk_db_meta( + meta = make_database_metadata( [ endpoint := EndpointInfo( url=URL("https://example.com/"), consistency=ConsistencyLevel.STRONG @@ -131,7 +131,7 @@ def test_EndpointSelector__single() -> None: def test_EndpointSelector__multi() -> None: - meta = mk_db_meta( + meta = make_database_metadata( [ endpoint_eventual := EndpointInfo( url=URL("https://example.com/eventual/"), From 659ad69fa16a9e7dabbe7b9538db6c46295b70bf Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 01:59:19 +0000 Subject: [PATCH 18/52] refactor: use mock db via datapath to test Kv.list() We were mocking the snapshot_read() function to return results from the mock db implementation, but now we access the mock db via the unmocked datapath module's HTTP requests to an aiohttp mock HTTP server. --- test/test_kv.py | 42 +++++++----------------------------------- 1 file changed, 7 insertions(+), 35 deletions(-) diff --git a/test/test_kv.py b/test/test_kv.py index 897c7ea..a6e7f93 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -66,7 +66,6 @@ from denokv.datapath import KvKeyTuple from denokv.datapath import RequestUnsuccessful from denokv.datapath import ResponseUnsuccessful -from denokv.datapath import SnapshotReadResult from denokv.datapath import increment_packed_key from denokv.datapath import pack_key from denokv.errors import DenoKvError @@ -348,8 +347,13 @@ def datapath_endpoint_url( @pytest.fixture -def meta(datapath_endpoint_url: URL) -> DatabaseMetadata: - return mk_db_meta([EndpointInfo(datapath_endpoint_url, ConsistencyLevel.STRONG)]) +def meta( + datapath_version: Literal[1, 2, 3], datapath_endpoint_url: URL +) -> DatabaseMetadata: + return make_database_metadata( + [EndpointInfo(datapath_endpoint_url, ConsistencyLevel.STRONG)], + version=datapath_version, + ) @pytest.fixture @@ -693,33 +697,6 @@ def list_example_entries() -> Mapping[KvKeyTuple, object]: } -@pytest.fixture -def mock_snapshot_read_to_return_mock_db_results( - mock_snapshot_read: AsyncMock, mock_db: MockKvDb -) -> Callable[[], AsyncMock]: - async def snapshot_read_effect( - *, - session: aiohttp.ClientSession, - meta: DatabaseMetadata, - endpoint: EndpointInfo, - read: SnapshotRead, - ) -> SnapshotReadResult: - assert len(read.ranges) == 1 - snapshot_read_output = SnapshotReadOutput( - ranges=[mock_db.snapshot_read_range(read.ranges[0])], - read_disabled=False, - read_is_strongly_consistent=True, - status=SnapshotReadStatus.SR_SUCCESS, - ) - return Ok(snapshot_read_output) - - def apply() -> AsyncMock: - mock_snapshot_read.side_effect = snapshot_read_effect - return mock_snapshot_read - - return apply - - list_example_keys = st.one_of( st.none(), st.just(()), @@ -773,7 +750,6 @@ def list_example_cursors( async def test_Kv_list__generates_values_from_sequential_snapshot_reads( data: st.DataObject, kv: Kv, - mock_snapshot_read_to_return_mock_db_results: Callable[[], AsyncMock], mock_db: MockKvDb, list_example_entries: Mapping[KvKeyTuple, object], prefix: KvKeyTuple | None, @@ -786,7 +762,6 @@ async def test_Kv_list__generates_values_from_sequential_snapshot_reads( ) -> None: mock_db.clear() add_entries(mock_db, list_example_entries) - mock_snapshot_read_to_return_mock_db_results() # Kv.list() should be equivalent to reading the listed range in one go. listed_range = datapath.read_range_multi( @@ -863,9 +838,6 @@ async def test_Kv_list__retries_retryable_snapshot_read_errors( create_kv: partial[Kv], meta: DatabaseMetadata, mock_snapshot_read: AsyncMock, - # mock_snapshot_read_to_return_mock_db_results: Callable[[], AsyncMock], - # mock_db: MockKvDb, - # list_example_entries: Mapping[KvKeyTuple, object], ) -> None: auth_fn = AsyncMock(name="auth_fn", return_value=Ok(meta)) db = create_kv(retry=repeat(0), auth=auth_fn) From dd5344169bec7360e175ba78b44ced7d623d247f Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 14 Oct 2024 02:55:32 +0000 Subject: [PATCH 19/52] test: use strict BigInt/Number V8 decoder in MockKvDb We now use a customised V8 decoder for the MockKvDb API that mirrors the default encoder used by Kv (that encodes int as BigInt): it always decodes int32 values as float rather than the v8serialize default of decoding int32 as int and Double as float. We don't rely on this being in place as we currently encode all int32 range values as Double (which are always decoded as float), but it seems prudent to ensure that we don't mistakenly treat small ints as BigInt instead of Number. --- test/denokv_testing.py | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index d809093..0f0e979 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -21,6 +21,9 @@ from fdb.tuple import pack from fdb.tuple import unpack from google.protobuf.message import Message +from v8serialize.constants import SerializationTag +from v8serialize.decode import DecodeContext +from v8serialize.decode import DecodeNextFn from yarl import URL from denokv._datapath_pb2 import AtomicWrite @@ -79,7 +82,27 @@ E2 = TypeVar("E2") MessageT = TypeVar("MessageT", bound=Message) -v8_decoder = v8serialize.Decoder() + +def decode_js_number_as_float( + tag: SerializationTag, /, ctx: DecodeContext, next: DecodeNextFn +) -> object: + if tag in { + SerializationTag.kInt32, + SerializationTag.kDouble, + SerializationTag.kUint32, + SerializationTag.kNumberObject, + }: + number = next(tag) + if isinstance(number, int): + return float(number) + return number + return next(tag) + + +v8_bigint_decoder = v8serialize.Decoder( + decode_steps=[decode_js_number_as_float, *v8serialize.default_decode_steps] +) +"""Decodes JS Number as float and BigInt as int.""" v8_bigint_encoder = create_default_v8_encoder() @@ -513,7 +536,7 @@ def decode_number_value( def decode_v8_number(data: bytes) -> int | float: try: - value = v8_decoder.decodes(data) + value = v8_bigint_decoder.decodes(data) except v8serialize.V8SerializeError as e: raise ValueError("data is not a valid V8-serialized value") from e if not isinstance(value, (int, float)): @@ -552,7 +575,7 @@ def encode_kv_write_value(value: object, expires_at_ms: int = 0) -> KvWriteValue def decode_enqueue_message(enqueue: Enqueue) -> MockKvDbMessage: try: - payload_value = v8_decoder.decodes(enqueue.payload) + payload_value = v8_bigint_decoder.decodes(enqueue.payload) except v8serialize.V8SerializeError as e: raise ValueError("Enqueue payload is not a valid V8-encoded value") from e keys_if_undelivered = list[KvKey]() @@ -726,9 +749,13 @@ def add_entries( return version -def unsafe_parse_protobuf_kv_entry(raw: ProtobufKvEntry) -> KvEntry: +def unsafe_parse_protobuf_kv_entry( + raw: ProtobufKvEntry, v8_decoder: v8serialize.Decoder | None = None +) -> KvEntry: + if v8_decoder is None: + v8_decoder = v8_bigint_decoder key, value, versionstamp = assume_ok( - parse_protobuf_kv_entry(raw, v8_decoder=v8_decoder, le64_type=KvU64) + parse_protobuf_kv_entry(raw, v8_decoder=v8_bigint_decoder, le64_type=KvU64) ) return KvEntry(KvKey.wrap_tuple_keys(key), value, VersionStamp(versionstamp)) From 6ec537e143f5042375bfe9795b94bea392034dd0 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 13 Oct 2024 15:01:45 +0000 Subject: [PATCH 20/52] feat: support is_ok/is_err for Kv write() result type CommittedWrite satisfies is_ok() and ConflictedWrite satisfies is_err(). --- src/denokv/_kv_writes.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index 8de85e2..0359839 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -31,6 +31,7 @@ from denokv._pycompat.typing import Container from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import MutableSequence +from denokv._pycompat.typing import Never from denokv._pycompat.typing import Protocol from denokv._pycompat.typing import Self from denokv._pycompat.typing import Sequence @@ -44,6 +45,8 @@ from denokv.datapath import AnyKvKey from denokv.datapath import pack_key from denokv.kv_keys import KvKey +from denokv.result import AnyFailure +from denokv.result import AnySuccess def encode_kv_write_value(value: object, *, v8_encoder: Encoder) -> dp_protobuf.KvValue: @@ -211,7 +214,11 @@ def enqueue( @dataclass(init=False, **slots_if310()) -class ConflictedWrite(FrozenAfterInitDataclass): +class ConflictedWrite(FrozenAfterInitDataclass, AnyFailure): + if TYPE_CHECKING: + + def _AnyFailure_marker(self, no_call: Never) -> Never: ... + ok: Literal[False] conflicts: Mapping[AnyKvKey, Check] versionstamp: None @@ -240,7 +247,11 @@ def __init__( @dataclass(init=False, **slots_if310()) -class CommittedWrite(FrozenAfterInitDataclass): +class CommittedWrite(FrozenAfterInitDataclass, AnySuccess): + if TYPE_CHECKING: + + def _AnySuccess_marker(self, no_call: Never) -> Never: ... + ok: Literal[True] conflicts: Mapping[KvKey, Check] # empty versionstamp: VersionStamp From 1c205a89d1377d541c66db8a85a0fbb42878de7a Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 14 Oct 2024 05:15:08 +0000 Subject: [PATCH 21/52] feat: include error details in ResponseUnsuccessful str The ResponseUnsuccessful exception held details of the error message from the db server, but was not showing it in the default string representation; now it does. --- src/denokv/datapath.py | 6 ++++++ test/test_datapath.py | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/src/denokv/datapath.py b/src/denokv/datapath.py index 512cdae..a371b85 100644 --- a/src/denokv/datapath.py +++ b/src/denokv/datapath.py @@ -173,6 +173,12 @@ def __init__( self.status = status self.body_text = body_text + def __str__(self) -> str: + return ( + f"{super().__str__()}: HTTP response: status={self.status}, " + f"body_text={self.body_text!r}" + ) + class RequestUnsuccessful(DataPathDenoKvError): """Unable to make a Data Path request to the KV server.""" diff --git a/test/test_datapath.py b/test/test_datapath.py index 99087e1..c4fc72d 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -1408,3 +1408,20 @@ def test_CheckFailure__validates_constructor_args( failed_check_indexes=[5], endpoint=example_endpoint, ) + + +def test_ResponseUnsuccessful(example_endpoint: EndpointInfo) -> None: + msg = "Server rejected Data Path request indicating client error" + response_body = "Info about what is wrong." + status = 400 + e = ResponseUnsuccessful( + msg, + status=status, + body_text=response_body, + endpoint=example_endpoint, + auto_retry=AutoRetry.NEVER, + ) + assert str(msg) in str(e) + assert str(status) in str(e) + assert str(response_body) in str(e) + assert e.endpoint is example_endpoint From c2d4a490063261a72871bcfb67df23d675a54407 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 19 Oct 2024 04:49:23 +0000 Subject: [PATCH 22/52] fix: don't use BaseException for DenoKvError DenoKvError was extending BaseException instead of Exception, which meant that catch-all Exception handlers didn't catch it. --- src/denokv/errors.py | 2 +- test/test_errors.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 test/test_errors.py diff --git a/src/denokv/errors.py b/src/denokv/errors.py index 83734a0..4326423 100644 --- a/src/denokv/errors.py +++ b/src/denokv/errors.py @@ -4,7 +4,7 @@ @dataclass(init=False) -class DenoKvError(BaseException): +class DenoKvError(Exception): message: str def __init__(self, message: str, *args: object) -> None: diff --git a/test/test_errors.py b/test/test_errors.py new file mode 100644 index 0000000..031bc42 --- /dev/null +++ b/test/test_errors.py @@ -0,0 +1,9 @@ +import pytest + +from denokv.errors import DenoKvError + + +def test_errors_are_regular_exceptions() -> None: + """Errors must be caught by generic Exception handlers — not BaseException.""" + with pytest.raises(Exception): # noqa: B017 + raise DenoKvError("error") From a040e1fbf9bd6648e9eadf720d3e0dc35b8b712d Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 26 Oct 2024 03:24:32 +0000 Subject: [PATCH 23/52] feat: make ConsistencyLevel enum ordered --- src/denokv/auth.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/denokv/auth.py b/src/denokv/auth.py index a18219e..1e460eb 100644 --- a/src/denokv/auth.py +++ b/src/denokv/auth.py @@ -1,5 +1,6 @@ from __future__ import annotations +import functools from dataclasses import dataclass from datetime import datetime from uuid import UUID @@ -54,10 +55,27 @@ class DatabaseMetadata: expires_at: datetime +@functools.total_ordering class ConsistencyLevel(StrEnum): + """ + A read consistency requirement for a Deno KV Database server endpoint. + + Examples + -------- + Levels are ordered by amount of consistency — strong greater than eventual. + + >>> assert ConsistencyLevel.STRONG > ConsistencyLevel.EVENTUAL + >>> assert ConsistencyLevel.EVENTUAL < ConsistencyLevel.STRONG + """ + STRONG = "strong" EVENTUAL = "eventual" + def __lt__(self, value: object) -> bool: + if not isinstance(value, ConsistencyLevel): + return NotImplemented + return self is ConsistencyLevel.EVENTUAL and value is ConsistencyLevel.STRONG + @dataclass(frozen=True, **slots_if310()) class EndpointInfo: From ee0bd7077ae189bedbc018310ed7ac881abb524e Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 15:27:34 +0000 Subject: [PATCH 24/52] test: provide assertion errors for protobuf Message We now provide a custom assertion failure message for comparisons involving two protobuf Message objects. The property values are diffed, similar to how pytest diffs dicts, etc. --- test/conftest.py | 50 ++++++++++++++++++++++++++++++++++++++++++ test/denokv_testing.py | 28 +++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/test/conftest.py b/test/conftest.py index e6c8d8a..9cfde92 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,3 +1,53 @@ +from __future__ import annotations + +from google.protobuf.message import Message +from pytest import Config + +from denokv._pycompat.typing import Sequence from test import advance_time +from test.denokv_testing import diff_protobuf_messages advance_time_time = advance_time.advance_time_time + +_pytest_assertion_verbosity: int = 0 + + +def pytest_configure(config: Config) -> None: + global _pytest_assertion_verbosity + _pytest_assertion_verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + +# Provide descriptive diffs for failed protobuf message equality assertions +def pytest_assertrepr_compare( + op: str, left: object, right: object +) -> Sequence[str] | None: + if isinstance(left, Message) and isinstance(right, Message): + repr_left = f"<{left.DESCRIPTOR.full_name} protobuf message at {hex(id(left))}>" + repr_right = ( + f"<{right.DESCRIPTOR.full_name} protobuf message at {hex(id(right))}>" + ) + comparison = [f"{repr_left} {op} {repr_right}"] + + if left == right: + comparison.append("Protobuf messages are equal") + return comparison + if type(left) is not type(right): + comparison.append("Protobuf messages are different types") + return comparison + + end = ( + " (use -v for diff)" + if _pytest_assertion_verbosity == 0 + else " (repeat -v for more context):" + ) + comparison.append(f"Protobuf messages are not equal{end}") + if _pytest_assertion_verbosity == 0: + return comparison + + # Scale context lines with verbosity level: 1=3, 2=9, 3=27, 4=81, 5=243 + context = 3**_pytest_assertion_verbosity + comparison.extend( + diff_protobuf_messages(left, right, context_line_count=context, lineterm="") + ) + return comparison + return None diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 0f0e979..f03aa99 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -1,5 +1,6 @@ from __future__ import annotations +import difflib import math import re import sys @@ -83,6 +84,33 @@ MessageT = TypeVar("MessageT", bound=Message) +def diff_protobuf_messages( + left: Message, + right: Message, + *, + left_name: str | None = None, + right_name: str | None = None, + context_line_count: int = 3, + lineterm: str = "\n", +) -> Sequence[str]: + if left_name is None: + left_name = f"left: {left.DESCRIPTOR.full_name}" + if right_name is None: + right_name = f"right: {right.DESCRIPTOR.full_name}" + left_lines = str(left).splitlines(keepends=False) + right_lines = str(right).splitlines(keepends=False) + return [ + *difflib.unified_diff( + left_lines, + right_lines, + fromfile=left_name, + tofile=right_name, + n=context_line_count, + lineterm=lineterm, + ) + ] + + def decode_js_number_as_float( tag: SerializationTag, /, ctx: DecodeContext, next: DecodeNextFn ) -> object: From 4069c9bd9874bb76cf9feade724601e4428348a5 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 28 Dec 2024 08:18:00 +0000 Subject: [PATCH 25/52] test: add mocked(...) helper to access Mock methods --- test/denokv_testing.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index f03aa99..ec2eed9 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -14,6 +14,7 @@ from itertools import groupby from typing import Literal from typing import overload +from unittest.mock import Mock from uuid import UUID import v8serialize @@ -84,6 +85,12 @@ MessageT = TypeVar("MessageT", bound=Message) +def mocked(mocked_value: object) -> Mock: + """Type-safely cast `mocked_value` to a Mock.""" + assert isinstance(mocked_value, Mock) + return mocked_value + + def diff_protobuf_messages( left: Message, right: Message, From 18896ce9fce898d503e150421bf36deeeb35571b Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 15:29:08 +0000 Subject: [PATCH 26/52] chore: support __notes__ < py3.11 We'll add __notes__ to exceptions in older versions, but in practice they won't be visible in 3.9's tracebacks. Based on: https://github.com/h4l/v8serialize/commit/dec9d82a8df6d94eed42c9a5e6ca029bf41954f4 --- src/denokv/_pycompat/exceptions.py | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 src/denokv/_pycompat/exceptions.py diff --git a/src/denokv/_pycompat/exceptions.py b/src/denokv/_pycompat/exceptions.py new file mode 100644 index 0000000..57b9447 --- /dev/null +++ b/src/denokv/_pycompat/exceptions.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from denokv._pycompat.typing import Protocol +from denokv._pycompat.typing import TypeGuard +from denokv._pycompat.typing import TypeVar +from denokv._pycompat.typing import cast + + +class Notes(Protocol): + __notes__: list[str] + + +def has_notes(exc: BaseException) -> TypeGuard[Notes]: + return isinstance(getattr(exc, "__notes__", None), list) + + +def add_note(exc: BaseException, note: str) -> None: + if not isinstance(note, str): + raise TypeError("note must be a str") + if not has_notes(exc): + exc_with_notes = cast(Notes, exc) + exc_with_notes.__notes__ = notes = [] + else: + notes = exc.__notes__ + notes.append(note) + + +ExceptionT = TypeVar("ExceptionT", bound=BaseException) + + +def with_notes(exc: ExceptionT, *notes: str) -> ExceptionT: + for note in notes: + add_note(exc, note) + return exc From 66d67751e1cdab6790ff21aeec14e7636024678f Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Thu, 16 Jan 2025 02:44:24 +0000 Subject: [PATCH 27/52] feat: support copying notes between exceptions The with_notes() Exception util function now supports a from_exception argument, to copy the notes from another exception to the subject. --- src/denokv/_pycompat/exceptions.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/denokv/_pycompat/exceptions.py b/src/denokv/_pycompat/exceptions.py index 57b9447..acfd02a 100644 --- a/src/denokv/_pycompat/exceptions.py +++ b/src/denokv/_pycompat/exceptions.py @@ -28,7 +28,12 @@ def add_note(exc: BaseException, note: str) -> None: ExceptionT = TypeVar("ExceptionT", bound=BaseException) -def with_notes(exc: ExceptionT, *notes: str) -> ExceptionT: +def with_notes( + exc: ExceptionT, *notes: str, from_exception: BaseException | None = None +) -> ExceptionT: + if from_exception and has_notes(from_exception): + for note in from_exception.__notes__: + add_note(exc, note) for note in notes: add_note(exc, note) return exc From 4c3d945e7c5b89cfd1d85f77f21f4e968fcbb7af Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 30 Dec 2024 15:50:37 +0000 Subject: [PATCH 28/52] chore: upgrade v8serialize to 0.2.0 (alpha) This version supports a JSBigInt type, to disambiguate int between bigint and number. --- poetry.lock | 8 ++++---- pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 97f3665..5c55821 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2919,13 +2919,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "v8serialize" -version = "0.1.0" +version = "0.2.0a0" description = "Read & write JavaScript values from Python with the V8 serialization format." optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "v8serialize-0.1.0-py3-none-any.whl", hash = "sha256:5136e50c24308f9ddc7b8083ca34e7c65f57cd321dd703b9667708a8552eebed"}, - {file = "v8serialize-0.1.0.tar.gz", hash = "sha256:bd330fb925be9c395d82ed4f048b78f0d560d4358b3061d149665d8a8cc60d86"}, + {file = "v8serialize-0.2.0a0-py3-none-any.whl", hash = "sha256:643557ed38757a5ddaac008469bc87c47e1e89df062941ce133323e60122f4f0"}, + {file = "v8serialize-0.2.0a0.tar.gz", hash = "sha256:57dc3262a089ba5917da4d53300fa392e50e19ab2476ef3e507d6061f70ee332"}, ] [package.dependencies] @@ -3098,4 +3098,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "4581fc6eb84b066a30991807676d038a115ea4e6a24093059d9dc46f2cab1bed" +content-hash = "eceacf465958af528559937236cb0ab10ae6d4a8d492c1e075650c451a85b5db" diff --git a/pyproject.toml b/pyproject.toml index ac52135..1fc3476 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ protobuf = ">=4.22.0,<6" # change was in 6.2.4 (which is late 2019) # https://github.com/apple/foundationdb/commits/main/bindings/python/fdb/tuple.py foundationdb = ">=6.2.4,<8" -v8serialize = "^0.1.0" +v8serialize = "^0.2.0-alpha.0" [tool.poetry.group.dev.dependencies] pytest = "^8.3.2" From 5831fb166c56da90dd565d96ddcd6b217b29e023 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 7 Jan 2025 08:18:19 +0000 Subject: [PATCH 29/52] refactor!: remove customised v8serialize Encoder denokv no longer customises the v8serialize Encoder/Decoder. Previously it changed the encoding behaviour to always treat int as bigint. This was necessary because v8serialize did not have a specific type for bigint, so int would get encoded as float (JS Number) or bigint depending on the size of the int, so there was no way to encode a small int as a bigint. v8serialize now includes a JSBigInt type which solves this problem. --- src/denokv/kv.py | 23 +---------------------- test/conftest.py | 7 +++++++ test/denokv_testing.py | 18 +++++++----------- test/test_datapath.py | 13 +++++++------ 4 files changed, 22 insertions(+), 39 deletions(-) diff --git a/src/denokv/kv.py b/src/denokv/kv.py index 9b237fb..2093843 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -108,27 +108,6 @@ def v8_encode_int_as_bigint( next(value) -# TODO: add an explicit tagged JSBigInt type to v8serialize -def create_default_v8_encoder() -> v8serialize.Encoder: - """ - Create a new V8-serialization format Encoder. - - This encoder always encodes int as JavaScript BigInt. We use this by default - for Kv instances to ensure consistent handling of int and float types. - - Notes - ----- - In contrast, the `v8serialize` default encoder encodes int as Number when it - fits in the +/- 2**53 - 1 range which float64 can represent exactly. This - results in differing number representation for different number sizes, which - is likely to be a footgun in the context of the Sum/Min/Max write - operations. - """ - return v8serialize.Encoder( - encode_steps=[v8_encode_int_as_bigint, *v8serialize.default_encode_steps] - ) - - class KvListOptions(TypedDict, total=False): """Keyword arguments of `Kv.list()`.""" @@ -418,7 +397,7 @@ def __init__( self.session = session self.metadata_cache = DatabaseMetadataCache(authenticator=auth) self.retry_delays = ExponentialBackoff() if retry is None else retry - self.v8_encoder = v8_encoder or create_default_v8_encoder() + self.v8_encoder = v8_encoder or Encoder() self.v8_decoder = v8_decoder or Decoder() self.flags = KvFlags.IntAsNumber if flags is None else flags diff --git a/test/conftest.py b/test/conftest.py index 9cfde92..d929b08 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,7 +1,9 @@ from __future__ import annotations +import pytest from google.protobuf.message import Message from pytest import Config +from v8serialize import Encoder from denokv._pycompat.typing import Sequence from test import advance_time @@ -51,3 +53,8 @@ def pytest_assertrepr_compare( ) return comparison return None + + +@pytest.fixture(scope="session") +def v8_encoder() -> Encoder: + return Encoder() diff --git a/test/denokv_testing.py b/test/denokv_testing.py index ec2eed9..6d6b100 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -71,7 +71,6 @@ from denokv.errors import InvalidCursor from denokv.kv import AnyCursorFormat from denokv.kv import ListContext -from denokv.kv import create_default_v8_encoder from denokv.kv_keys import KvKey from denokv.result import Err from denokv.result import Ok @@ -134,11 +133,8 @@ def decode_js_number_as_float( return next(tag) -v8_bigint_decoder = v8serialize.Decoder( - decode_steps=[decode_js_number_as_float, *v8serialize.default_decode_steps] -) -"""Decodes JS Number as float and BigInt as int.""" -v8_bigint_encoder = create_default_v8_encoder() +default_v8_decoder = v8serialize.Decoder() +default_v8_encoder = v8serialize.Encoder() def assume_ok(result: Result[T, E]) -> T: @@ -571,7 +567,7 @@ def decode_number_value( def decode_v8_number(data: bytes) -> int | float: try: - value = v8_bigint_decoder.decodes(data) + value = default_v8_decoder.decodes(data) except v8serialize.V8SerializeError as e: raise ValueError("data is not a valid V8-serialized value") from e if not isinstance(value, (int, float)): @@ -581,7 +577,7 @@ def decode_v8_number(data: bytes) -> int | float: def encode_number_value(value: int | float, encoding: ValueEncoding) -> bytes: if encoding == ValueEncoding.VE_V8: - return bytes(v8_bigint_encoder.encode(value)) + return bytes(default_v8_encoder.encode(value)) elif encoding == ValueEncoding.VE_LE64: if isinstance(value, float): raise TypeError("Cannot encode float as LE64") @@ -610,7 +606,7 @@ def encode_kv_write_value(value: object, expires_at_ms: int = 0) -> KvWriteValue def decode_enqueue_message(enqueue: Enqueue) -> MockKvDbMessage: try: - payload_value = v8_bigint_decoder.decodes(enqueue.payload) + payload_value = default_v8_decoder.decodes(enqueue.payload) except v8serialize.V8SerializeError as e: raise ValueError("Enqueue payload is not a valid V8-encoded value") from e keys_if_undelivered = list[KvKey]() @@ -788,9 +784,9 @@ def unsafe_parse_protobuf_kv_entry( raw: ProtobufKvEntry, v8_decoder: v8serialize.Decoder | None = None ) -> KvEntry: if v8_decoder is None: - v8_decoder = v8_bigint_decoder + v8_decoder = default_v8_decoder key, value, versionstamp = assume_ok( - parse_protobuf_kv_entry(raw, v8_decoder=v8_bigint_decoder, le64_type=KvU64) + parse_protobuf_kv_entry(raw, v8_decoder=v8_decoder, le64_type=KvU64) ) return KvEntry(KvKey.wrap_tuple_keys(key), value, VersionStamp(versionstamp)) diff --git a/test/test_datapath.py b/test/test_datapath.py index c4fc72d..b541d61 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -18,6 +18,7 @@ from hypothesis import given from hypothesis import strategies as st from v8serialize import Decoder +from v8serialize.jstypes import JSBigInt from yarl import URL from denokv import datapath @@ -75,12 +76,12 @@ from denokv.result import is_ok from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries +from test.denokv_testing import default_v8_encoder from test.denokv_testing import make_database_metadata from test.denokv_testing import meta_endpoint from test.denokv_testing import mock_db_api from test.denokv_testing import nextafter from test.denokv_testing import unsafe_parse_protobuf_kv_entry -from test.denokv_testing import v8_bigint_encoder TestClient: TypeAlias = _TestClient[web.Request, web.Application] @@ -841,7 +842,7 @@ async def test_atomic_write__handles_unsuccessful_responses( @pytest.fixture def example_entries_write() -> Mapping[KvKeyTuple, object]: - return {("bigint", 1): 10} + return {("bigint", 1): JSBigInt(10)} # There's not really much point in testing many successful mutations here, as @@ -858,7 +859,7 @@ def example_entries_write() -> Mapping[KvKeyTuple, object]: Mutation( key=pack_key(("bigint", 1)), value=KvValue( - data=bytes(v8_bigint_encoder.encode(20)), + data=bytes(default_v8_encoder.encode(JSBigInt(20))), encoding=ValueEncoding.VE_V8, ), mutation_type=MutationType.M_SET, @@ -870,7 +871,7 @@ def example_entries_write() -> Mapping[KvKeyTuple, object]: start=pack_key(("bigint", 1)), end=pack_key(("bigint", 2)), limit=1 ), ], - [[(KvKey("bigint", 1), 20)]], + [[(KvKey("bigint", 1), JSBigInt(20))]], id="set", ), pytest.param( @@ -879,7 +880,7 @@ def example_entries_write() -> Mapping[KvKeyTuple, object]: Mutation( key=pack_key(("bigint", 1)), value=KvValue( - data=bytes(v8_bigint_encoder.encode(20)), + data=bytes(default_v8_encoder.encode(JSBigInt(20))), encoding=ValueEncoding.VE_V8, ), mutation_type=MutationType.M_SUM, @@ -891,7 +892,7 @@ def example_entries_write() -> Mapping[KvKeyTuple, object]: start=pack_key(("bigint", 1)), end=pack_key(("bigint", 2)), limit=1 ), ], - [[(KvKey("bigint", 1), 30)]], + [[(KvKey("bigint", 1), JSBigInt(30))]], id="sum", ), ], From 6b417f595d0153bb7d8ef70720face8b170ca50d Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 31 Dec 2024 17:09:32 +0000 Subject: [PATCH 30/52] test: add typeval() util function --- test/denokv_testing.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 6d6b100..5449cd0 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -844,3 +844,7 @@ def nextafter(x: float, y: float, *, steps: int = 1) -> float: for _ in range(steps): x = math.nextafter(x, y) return x + + +def typeval(value: T) -> tuple[type[T], T]: + return type(value), value From db1fb353a98e924666ffc4b1dba149f36d069057 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 15:30:27 +0000 Subject: [PATCH 31/52] feat: add @frozen class decorator It disables setattr and delattr, like @dataclass(frozen=True) --- src/denokv/_utils.py | 22 ++++++++++++++++++ test/test__utils.py | 54 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 src/denokv/_utils.py create mode 100644 test/test__utils.py diff --git a/src/denokv/_utils.py b/src/denokv/_utils.py new file mode 100644 index 0000000..41cb365 --- /dev/null +++ b/src/denokv/_utils.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from dataclasses import FrozenInstanceError + +from denokv._pycompat.typing import TypeVar + +TypeT = TypeVar("TypeT", bound=type) + + +def frozen_setattr(cls: type, name: str, value: object) -> None: + raise FrozenInstanceError(f"Cannot assign to field {name!r}") + + +def frozen_delattr(cls: type, name: str) -> None: + raise FrozenInstanceError(f"Cannot delete field {name!r}") + + +def frozen(cls: TypeT) -> TypeT: + """Disable `__setattr__` and `__delattr__`, much like @dataclass(frozen=True).""" + cls.__setattr__ = frozen_setattr # type: ignore[method-assign,assignment] + cls.__delattr__ = frozen_delattr # type: ignore[method-assign,assignment] + return cls diff --git a/test/test__utils.py b/test/test__utils.py new file mode 100644 index 0000000..4ae0c79 --- /dev/null +++ b/test/test__utils.py @@ -0,0 +1,54 @@ +from dataclasses import FrozenInstanceError +from dataclasses import dataclass +from enum import Enum + +import pytest + +from denokv._pycompat.typing import Final +from denokv._utils import frozen + + +def test_frozen_decorator() -> None: + @dataclass + class Info: + label: Final[str] # type: ignore[misc] + size: Final[int] # type: ignore[misc] + + class Things(Info, Enum): + FOO = "foo", 42 + BAR = "bar", 100 + + # Enums prevent assigning to special enum fields + with pytest.raises(AttributeError): + Things.FOO.name = "XXX" # type: ignore[misc] + + # But custom fields are writable + assert Things.FOO.label == "foo" + assert Things.FOO.size == 42 + + Things.FOO.label = "lol" # type: ignore[misc] + del Things.FOO.size + + assert Things.FOO.label == "lol" + assert not hasattr(Things.FOO, "size") + + # By not when using @frozen + + @frozen + class FrozenThings(Info, Enum): + FOO = "foo", 42 + BAR = "bar", 100 + + with pytest.raises(FrozenInstanceError): + FrozenThings.FOO.name = "XXX" # type: ignore[misc] + + assert FrozenThings.FOO.label == "foo" + assert FrozenThings.FOO.size == 42 + + with pytest.raises(FrozenInstanceError): + FrozenThings.FOO.label = "lol" # type: ignore[misc] + with pytest.raises(FrozenInstanceError): + del FrozenThings.FOO.size + + assert FrozenThings.FOO.label == "foo" + assert FrozenThings.FOO.size == 42 From d0a848f6bbd5da2344b4f42260259e9de2e2fd58 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 4 Jan 2025 10:01:32 +0000 Subject: [PATCH 32/52] fix: annotate KvU64.RANGE mypy thought it was Any. --- src/denokv/_kv_values.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/denokv/_kv_values.py b/src/denokv/_kv_values.py index f93ecfa..138a5da 100644 --- a/src/denokv/_kv_values.py +++ b/src/denokv/_kv_values.py @@ -115,7 +115,7 @@ class KvU64: ValueError: value not in range for 64-bit unsigned int """ - RANGE: ClassVar = range(0, 2**64) + RANGE: ClassVar[range] = range(0, 2**64) value: int def __init__(self, value: bytes | int) -> None: From f9d21183d8284e199eaf3bbc3062e07a92abe8b6 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 5 Jan 2025 08:35:33 +0000 Subject: [PATCH 33/52] test: configure hypothesis profiles to run more examples --- test/conftest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/conftest.py b/test/conftest.py index d929b08..9cecf7c 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,7 +1,11 @@ from __future__ import annotations +import os + import pytest from google.protobuf.message import Message +from hypothesis import Verbosity +from hypothesis import settings from pytest import Config from v8serialize import Encoder @@ -9,6 +13,11 @@ from test import advance_time from test.denokv_testing import diff_protobuf_messages +settings.register_profile("ci", max_examples=1000) +settings.register_profile("dev", max_examples=10) +settings.register_profile("debug", max_examples=10, verbosity=Verbosity.verbose) +settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default").lower()) + advance_time_time = advance_time.advance_time_time _pytest_assertion_verbosity: int = 0 From 974443310f7f8ecd0ba10114fcd39623ec8a65a5 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 6 Jan 2025 07:09:36 +0000 Subject: [PATCH 34/52] feat: add Result.or_raise(), Result.value_or_raise() --- src/denokv/result.py | 54 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/src/denokv/result.py b/src/denokv/result.py index 68ba208..02e2eef 100644 --- a/src/denokv/result.py +++ b/src/denokv/result.py @@ -902,6 +902,23 @@ def or_else(self, fn: Callable[[], Result[T, U]]) -> Result[T_co | T, U]: >>> assert Err('error a').or_else(lambda: Err('error b')) == Err('error b') """ + def or_raise(self) -> Ok[T_co]: + """ + Return the Ok as-is, or raise the Err.error value if this is Err. + + Examples + -------- + >>> assert Ok(1).or_raise() == Ok(1) + + >>> Err(ValueError('bad')).or_raise() + Traceback (most recent call last): + ValueError: bad + + >>> Err('foo').or_raise() + Traceback (most recent call last): + Exception: foo + """ + def value_or(self, default: U) -> T_co | U: """ Return the Ok's value, or default if this is Err. @@ -922,6 +939,23 @@ def value_or_else(self, fn: Callable[[], U]) -> T_co | U: >>> assert Err('x').value_or_else(lambda: 2) == 2 """ + def value_or_raise(self) -> T_co: + """ + Return the Ok's value, or raise the Err.error value if this is Err. + + Examples + -------- + >>> assert Ok(1).value_or_raise() == 1 + + >>> Err(ValueError('bad')).value_or_raise() + Traceback (most recent call last): + ValueError: bad + + >>> Err('foo').value_or_raise() + Traceback (most recent call last): + Exception: foo + """ + def __iter__(self) -> Iterator[T_co]: """ Return an iterator containing the Ok's value or no values if this is Err. @@ -1111,6 +1145,10 @@ def or_(self, default: Result[T, U]) -> Result[T_co, U]: def or_else(self, fn: Callable[[], Result[T, U]]) -> Result[T_co, U]: return self + @doc_from(ResultMethods) + def or_raise(self) -> Ok[T_co]: + return self + @doc_from(ResultMethods) def value_or(self, default: U) -> T_co: return self.value @@ -1119,6 +1157,10 @@ def value_or(self, default: U) -> T_co: def value_or_else(self, fn: Callable[[], U]) -> T_co: return self.value + @doc_from(ResultMethods) + def value_or_raise(self) -> T_co: + return self.value + def __iter__(self) -> Iterator[T_co]: return iter((self.value,)) @@ -1202,6 +1244,12 @@ def or_(self, default: Result[T_co, U]) -> Result[T_co, U]: def or_else(self, fn: Callable[[], Result[T_co, U]]) -> Result[T_co, U]: return fn() + @doc_from(ResultMethods) + def or_raise(self) -> Never: + if isinstance(self.error, BaseException): + raise self.error + raise Exception(self.error) + if not TYPE_CHECKING: @property @@ -1216,6 +1264,12 @@ def value_or(self, x_default: U) -> U: def value_or_else(self, fn: Callable[[], U]) -> U: return fn() + @doc_from(ResultMethods) + def value_or_raise(self) -> Never: + if isinstance(self.error, BaseException): + raise self.error + raise Exception(self.error) + def __iter__(self) -> Iterator[Never]: return iter(()) From 307cb4bbfb3a1d5eecff1e5e477420b6c849710e Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Thu, 9 Jan 2025 09:01:09 +0000 Subject: [PATCH 35/52] test: improve number type handling in mock KV MockKvDb's atomic sum/min/max operations now allow int/float to be used for float sum operations. Previously sum operations had to use Python float values, because int values were used to distinguish bigint values. Now that v8serialize uses JSBigInt for bigint and decodes int-safe floats as int, it's necessary to allow int in float operations. We now segregate bigint/float/u64 number types according to the datapath protobuf encoding rather than Python type, which avoids clashes between bigint and float using Python int. --- test/denokv_testing.py | 172 +++++++++++++++++++++++------------------ 1 file changed, 97 insertions(+), 75 deletions(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 5449cd0..cbc74ff 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -26,6 +26,7 @@ from v8serialize.constants import SerializationTag from v8serialize.decode import DecodeContext from v8serialize.decode import DecodeNextFn +from v8serialize.jstypes import JSBigInt from yarl import URL from denokv._datapath_pb2 import AtomicWrite @@ -47,6 +48,7 @@ from denokv._kv_values import VersionStamp from denokv._kv_writes import LimitExceededPolicy from denokv._pycompat.dataclasses import slots_if310 +from denokv._pycompat.enum import EvalEnumRepr from denokv._pycompat.protobuf import enum_name from denokv._pycompat.typing import Any from denokv._pycompat.typing import Callable @@ -161,6 +163,38 @@ def assume_err(result: Result[T, E], type: type[E2] | None = None) -> E | E2: ) +@dataclass(repr=False) +class KvNumberEncoding: + py_name: str + js_name: str | None + value_encoding: ValueEncoding + + +class KvNumber(KvNumberEncoding, EvalEnumRepr): + """ + The number types supported by datapath Sum/Min/Max operations. + + Examples + -------- + >>> f'Foo bar: {KvNumber.bigint}' + 'Foo bar: JSBigInt (VE_V8 BigInt)' + >>> f'Foo bar: {KvNumber.u64}' + 'Foo bar: KvU64 (VE_LE64)' + >>> repr(KvNumber.bigint) + 'KvNumber.bigint' + """ + + bigint = "JSBigInt", "BigInt", ValueEncoding.VE_V8 + float = "int/float", "Number", ValueEncoding.VE_V8 + u64 = "KvU64", None, ValueEncoding.VE_LE64 + + def __str__(self) -> str: + ve_name = enum_name(ValueEncoding, self.value_encoding) + if self.js_name is None: + return f"{self.py_name} ({ve_name})" + return f"{self.py_name} ({ve_name} {self.js_name})" + + @dataclass(**slots_if310(), frozen=True) class KvWriteValue: data: bytes @@ -354,6 +388,10 @@ def atomic_write( or mut.mutation_type == MutationType.M_MIN or mut.mutation_type == MutationType.M_MAX ): + # FIXME: Check this again, I don't think KvU64 actually allows + # bigint operands with the sqlite implementation. Does + # the FoundationDB impl act differently? + # # Deno KV allows sum(left, right) with certain combinations of # types: # @@ -384,29 +422,19 @@ def atomic_write( else: current_encoding, current_value = decode_number_value(current) - op = _get_number_operator(mut, current_encoding=current_encoding) + op = _get_number_operator(mut, operand_encoding=operand_encoding) if not _is_allowed_op_combination( op, (current_encoding, current_value), (operand_encoding, operand_value), ): - left_desc = "{} ({})".format( - None - if current_encoding is None - else enum_name(ValueEncoding, current_encoding), - type(current_value), - ) - right_desc = "{} ({})".format( - None - if operand_encoding is None - else enum_name(ValueEncoding, operand_encoding), - type(operand_value), - ) raise ValueError( f"Cannot apply operation " - f"{enum_name(MutationType, mut.mutation_type)}" - f"({left_desc}, {right_desc})" + f"{enum_name(MutationType, mut.mutation_type)}, " + f"number types are incompatible: " + f"current type: {current_encoding}, " + f"operand type: {operand_encoding}" ) try: @@ -421,7 +449,7 @@ def atomic_write( mutation_entries[key_bytes] = KvWriteValue( data=encode_number_value(result, result_encoding), - encoding=result_encoding, + encoding=result_encoding.value_encoding, expire_at_ms=expires_at_ms, ) elif mut.mutation_type == MutationType.M_SET_SUFFIX_VERSIONSTAMPED_KEY: @@ -444,57 +472,60 @@ def atomic_write( def _is_allowed_op_combination( - op: Callable[[float, float], float] | MutationSumOperator | None, - left: tuple[ValueEncoding | None, float], - right: tuple[ValueEncoding | None, float], -) -> TypeIs[Callable[[float, float], float] | MutationSumOperator]: + op: Callable[[int | float, int | float], int | float] | MutationSumOperator | None, + left: tuple[KvNumber | None, int | float], + right: tuple[KvNumber, int | float], +) -> TypeIs[Callable[[int | float, int | float], int | float] | MutationSumOperator]: left_encoding, left_value = left right_encoding, right_value = right + + def values_are(types: type | tuple[type, ...]) -> bool: + return isinstance(left_value, types) and isinstance(right_value, types) + if isinstance(op, MutationSumOperator): - if left_encoding == ValueEncoding.VE_LE64: - return right_encoding == ValueEncoding.VE_LE64 or ( - right_encoding == ValueEncoding.VE_V8 and isinstance(right_value, int) + if left_encoding is KvNumber.u64: + return ( + right_encoding is KvNumber.u64 + or (right_encoding is KvNumber.bigint) + and values_are(int) ) - elif left_encoding == ValueEncoding.VE_V8: - return type(left_value) is type(right_value) + elif left_encoding is KvNumber.bigint: + return right_encoding is KvNumber.bigint and values_are(JSBigInt) + elif left_encoding is KvNumber.float: + return right_encoding is KvNumber.float elif left_encoding is None: # Sum can be used with a missing left operand. - return right_encoding == ValueEncoding.VE_LE64 or ( - right_encoding == ValueEncoding.VE_V8 - and isinstance(right_value, (int, float)) - ) + return right_encoding is KvNumber.float or values_are(int) elif op is min or op is max: return ( - left_encoding == ValueEncoding.VE_LE64 - or left_encoding is None - and right_encoding == ValueEncoding.VE_LE64 + left_encoding in (KvNumber.u64, None) + and right_encoding is KvNumber.u64 + and values_are(int) ) raise AssertionError(f"Unexpected op combinations: {op=}, {left=}, {right=}") def _get_number_operator( - mut: Mutation, *, current_encoding: ValueEncoding | None + mut: Mutation, *, operand_encoding: KvNumber ) -> Callable[[float, float], float] | None: if mut.mutation_type == MutationType.M_SUM: - min_ = decode_v8_number(mut.sum_min) if mut.sum_min else None - max_ = decode_v8_number(mut.sum_max) if mut.sum_max else None if ( - min_ is not None or max_ is not None or mut.sum_clamp + mut.sum_min or mut.sum_max or mut.sum_clamp ) and mut.value.encoding != ValueEncoding.VE_V8: - raise ValueError("Mutation used sum_min/sum_min with non-V8 encoding") - if min_ is not None and max_ is not None and type(min_) is not type(max_): raise ValueError( - "Mutation used different number types for sum_min and sum_max" + "Mutation used sum_min/sum_max/sum_clamp with non-V8 encoding" ) - - if ( - current_encoding == ValueEncoding.VE_LE64 - or mut.value.encoding == ValueEncoding.VE_LE64 - ): - if mut.sum_min or mut.sum_max or mut.sum_clamp: - raise ValueError("Mutation used custom sum limit with LE64 value") + if mut.value.encoding == ValueEncoding.VE_LE64: return MutationSumOperator(0, 2**64 - 1, LimitExceededPolicy.WRAP) + min_enc, min_ = decode_v8_number(mut.sum_min) if mut.sum_min else (None, None) + max_enc, max_ = decode_v8_number(mut.sum_max) if mut.sum_max else (None, None) + if (min_enc and min_enc is not operand_encoding) or ( + max_enc and max_enc is not operand_encoding + ): + raise ValueError( + "Mutation used different number types for value/sum_min/sum_max" + ) boundary = ( LimitExceededPolicy.CLAMP if mut.sum_clamp else LimitExceededPolicy.ERROR ) @@ -514,16 +545,7 @@ class MutationSumOperator: def __call__(self, left: int | float, right: int | float) -> int | float: min, max = self.min, self.max - if type(left) is not type(right): - raise TypeError(f"left and right must be the same type: {left=}, {right=}") - if (min is not None and type(min) is not type(left)) or ( - max is not None and type(max) is not type(left) - ): - raise TypeError( - "sum min/max value is a different number type than the operand values" - ) - if type(left) is not type(right): - raise TypeError(f"left and right must be the same type: {left=}, {right=}") + result = left + right if self.boundary is LimitExceededPolicy.WRAP: # wrap is only used for uint64 @@ -553,36 +575,36 @@ def __call__(self, left: int | float, right: int | float) -> int | float: def decode_number_value( entry: MockKvDbEntry | KvWriteValue | KvValue, -) -> tuple[ValueEncoding, int | float]: +) -> tuple[KvNumber, int | float]: if entry.encoding == ValueEncoding.VE_LE64: - return ValueEncoding.VE_LE64, KvU64(entry.data).value + return KvNumber.u64, KvU64(entry.data).value elif entry.encoding == ValueEncoding.VE_V8: - value = v8serialize.loads(entry.data) - if not isinstance(value, (int, float)): - raise ValueError("entry's value is not a V8-encoded BigInt or Number") - return ValueEncoding.VE_V8, value + return decode_v8_number(entry.data) else: raise ValueError("entry value is not an LE64 or V8-encoded BigInt or Number") -def decode_v8_number(data: bytes) -> int | float: - try: - value = default_v8_decoder.decodes(data) - except v8serialize.V8SerializeError as e: - raise ValueError("data is not a valid V8-serialized value") from e - if not isinstance(value, (int, float)): - raise ValueError("V8-serialized value is not a BigInt or Number") - return value +def decode_v8_number(data: bytes) -> tuple[KvNumber, int | float]: + value = default_v8_decoder.decodes(data) + if type(value) is JSBigInt: + return KvNumber.bigint, value + if type(value) in (int, float): + return KvNumber.float, cast(int | float, value) + raise ValueError("V8-serialized value is not a BigInt or Number") -def encode_number_value(value: int | float, encoding: ValueEncoding) -> bytes: - if encoding == ValueEncoding.VE_V8: - return bytes(default_v8_encoder.encode(value)) - elif encoding == ValueEncoding.VE_LE64: +def encode_number_value(value: int | float, encoding: KvNumber) -> bytes: + if encoding is KvNumber.float: + return bytes(default_v8_encoder.encode(float(value))) + elif encoding is KvNumber.bigint: + if isinstance(value, float): + raise TypeError("Cannot encode float as V8 BigInt") + return bytes(default_v8_encoder.encode(JSBigInt(value))) + else: + assert encoding is KvNumber.u64 if isinstance(value, float): raise TypeError("Cannot encode float as LE64") return KvU64(value).to_bytes() - raise ValueError(f"encoding is not LE64 or V8: {encoding}") def encode_kv_write_value(value: object, expires_at_ms: int = 0) -> KvWriteValue: From e15f97a882b2c4503c113e7532fc97df18e7079f Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Thu, 16 Jan 2025 03:04:52 +0000 Subject: [PATCH 36/52] test: allow AnyKvKey in denokv_testing.add_entries() The Mapping needs overloads as its params are invariant. --- test/denokv_testing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index cbc74ff..a1e390d 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -789,7 +789,9 @@ def meta_endpoint(meta: DatabaseMetadata) -> tuple[DatabaseMetadata, EndpointInf def add_entries( db: MockKvDb, - entries: Mapping[KvKeyTuple, object] | Iterable[tuple[KvKeyTuple, object]], + entries: Mapping[AnyKvKey, object] + | Mapping[KvKeyTuple, object] + | Iterable[tuple[AnyKvKey, object]], ) -> VersionStamp: if isinstance(entries, Mapping): entries = entries.items() From 1b510fdfa3fe5b71123cdfc60ffc5c9e6e42af50 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Wed, 15 Jan 2025 03:12:49 +0000 Subject: [PATCH 37/52] test: only include denokv module in coverage reports --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 1fc3476..b6a8ace 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -114,6 +114,7 @@ filterwarnings = [ ] [tool.coverage.run] +source = ["denokv"] omit = [ # generated Protocol Buffers module "*/denokv/_datapath_pb2.py", From 19624279ea4af25aa1d1ba941408e6ec4237e07d Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 07:45:16 +0000 Subject: [PATCH 38/52] feat: rework PlannedWrite & Mutation APIs The PlannedWrite and Mutation types now have much better support and APIs for performing atomic number operations, like sum, min and max. The first main feature is that the 3 number types (bigint, u64, float) can be selected explicitly with a number_type parameter, or implicitly via the type of the number value, and both options are type-safe. The second is that we support min/max mutations on bigint and float types, not just u64 (KvU64), and we support sum on KvU64 with both wrapping and clamp limits. The underlying datapath protocol (and Deno's own KV API) do not support these. This makes the atomic number APIs more intuitive, as otherwise the user needs to know about the arbitrary restriction that only certain number types can use Min/Max/Sum. I don't love the massive amount of @overload annotations it takes to type-annotate these functions well, but I don't think there's a better alternative. The way that PlannedWrite interfaces with the main Kv class is now decoupled with a general interface, so Kv and PlannedWrite do not have explicit dependencies on each other. This makes it easier to test and modularise the codebase, and also allows alternative writer implementations to be created for special cases. --- src/denokv/_kv_types.py | 69 +- src/denokv/_kv_writes.py | 1943 ++++++++++++++++++---- src/denokv/kv.py | 105 +- test/denokv_testing.py | 14 +- test/test__kv_writes__Check.py | 36 + test/test__kv_writes__CommittedWrite.py | 67 + test/test__kv_writes__ConflictedWrite.py | 82 + test/test__kv_writes__Delete.py | 23 + test/test__kv_writes__Enqueue.py | 70 + test/test__kv_writes__KvNumber.py | 67 + test/test__kv_writes__Limit.py | 57 + test/test__kv_writes__Max.py | 111 ++ test/test__kv_writes__Min.py | 111 ++ test/test__kv_writes__PlannedWrite.py | 273 +++ test/test__kv_writes__Set.py | 77 + test/test__kv_writes__Sum.py | 393 +++++ test/test__kv_writes__U64KvNumberType.py | 202 +++ test/test_kv.py | 166 +- 18 files changed, 3454 insertions(+), 412 deletions(-) create mode 100644 test/test__kv_writes__Check.py create mode 100644 test/test__kv_writes__CommittedWrite.py create mode 100644 test/test__kv_writes__ConflictedWrite.py create mode 100644 test/test__kv_writes__Delete.py create mode 100644 test/test__kv_writes__Enqueue.py create mode 100644 test/test__kv_writes__KvNumber.py create mode 100644 test/test__kv_writes__Limit.py create mode 100644 test/test__kv_writes__Max.py create mode 100644 test/test__kv_writes__Min.py create mode 100644 test/test__kv_writes__PlannedWrite.py create mode 100644 test/test__kv_writes__Set.py create mode 100644 test/test__kv_writes__Sum.py create mode 100644 test/test__kv_writes__U64KvNumberType.py diff --git a/src/denokv/_kv_types.py b/src/denokv/_kv_types.py index 2600e73..77ea8a9 100644 --- a/src/denokv/_kv_types.py +++ b/src/denokv/_kv_types.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ABC from abc import abstractmethod @@ -5,28 +7,81 @@ from v8serialize import Encoder from denokv._datapath_pb2 import AtomicWrite +from denokv._kv_values import VersionStamp from denokv._pycompat.typing import Generic from denokv._pycompat.typing import Protocol +from denokv._pycompat.typing import Sequence +from denokv._pycompat.typing import TypeAlias +from denokv._pycompat.typing import TypeGuard from denokv._pycompat.typing import TypeVar +from denokv._pycompat.typing import Union +from denokv.auth import EndpointInfo +from denokv.datapath import CheckFailure +from denokv.datapath import DataPathError +from denokv.result import Nothing +from denokv.result import Option +from denokv.result import Result +from denokv.result import Some WriteResultT = TypeVar("WriteResultT") +WriteResultT_co = TypeVar("WriteResultT_co", covariant=True) MessageT_co = TypeVar("MessageT_co", bound=Message, covariant=True) class ProtobufMessageRepresentation(Generic[MessageT_co], ABC): - """An object that can represent itself as a protobuf Message.""" + """An object that can represent itself as a protobuf Messages.""" __slots__ = () @abstractmethod - def as_protobuf(self, *, v8_encoder: Encoder) -> MessageT_co: ... + def as_protobuf(self, *, v8_encoder: Encoder) -> Sequence[MessageT_co]: ... + +class SingleProtobufMessageRepresentation(ProtobufMessageRepresentation[MessageT_co]): + """An object that can represent itself as a single protobuf Message.""" -class AtomicWriteRepresentation(ProtobufMessageRepresentation[AtomicWrite]): __slots__ = () + @abstractmethod + def as_protobuf(self, *, v8_encoder: Encoder) -> tuple[MessageT_co]: ... + + +class AtomicWriteRepresentation(SingleProtobufMessageRepresentation[AtomicWrite]): + __slots__ = () + + +class AtomicWriteRepresentationWriter( + AtomicWriteRepresentation, Generic[WriteResultT_co] +): + __slots__ = () + + @abstractmethod + async def write(self, kv: KvWriter, *, v8_encoder: Encoder) -> WriteResultT_co: ... + + +KvWriterWriteResult: TypeAlias = Result[ + tuple[VersionStamp, EndpointInfo], Union[CheckFailure, DataPathError] +] + + +class KvWriter(ABC): + """A low-level interface for objects that can perform KV writes.""" + + @abstractmethod + async def write(self, *, protobuf_atomic_write: AtomicWrite) -> KvWriterWriteResult: + """Write a protobuf AtomicWrite message to the database.""" + + +class V8EncoderProvider(Protocol): + @property + def v8_encoder(self) -> Encoder: ... + + +def is_v8_encoder_provider(obj: object) -> TypeGuard[V8EncoderProvider]: + return isinstance(getattr(obj, "v8_encoder", None), Encoder) + -class KvWriter(Protocol): - async def write( - self, atomic_write: AtomicWriteRepresentation, / - ) -> WriteResultT: ... +def get_v8_encoder(maybe_v8_encoder_provider: object) -> Option[Encoder]: + if is_v8_encoder_provider(maybe_v8_encoder_provider): + return Some(maybe_v8_encoder_provider.v8_encoder) + return Nothing() diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index 0359839..89324fa 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -1,34 +1,41 @@ from __future__ import annotations -from abc import ABC from abc import abstractmethod +from builtins import float as float_ from dataclasses import dataclass from dataclasses import field from datetime import datetime from enum import Enum +from functools import total_ordering from itertools import islice from types import MappingProxyType from typing import Literal from typing import overload -import v8serialize from v8serialize import Encoder -from v8serialize.constants import SerializationTag -from v8serialize.decode import ReadableTagStream +from v8serialize.constants import FLOAT64_SAFE_INT_RANGE +from v8serialize.encode import WritableTagStream +from v8serialize.jstypes import JSBigInt from denokv import _datapath_pb2 as dp_protobuf from denokv._datapath_pb2 import AtomicWrite -from denokv._kv_types import AtomicWriteRepresentation +from denokv._kv_types import AtomicWriteRepresentationWriter from denokv._kv_types import KvWriter +from denokv._kv_types import ProtobufMessageRepresentation +from denokv._kv_types import SingleProtobufMessageRepresentation +from denokv._kv_types import get_v8_encoder from denokv._kv_values import KvEntry as KvEntry from denokv._kv_values import KvU64 as KvU64 from denokv._kv_values import VersionStamp as VersionStamp from denokv._pycompat.dataclasses import FrozenAfterInitDataclass from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.enum import EvalEnumRepr -from denokv._pycompat.protobuf import enum_name +from denokv._pycompat.exceptions import with_notes from denokv._pycompat.typing import TYPE_CHECKING +from denokv._pycompat.typing import Any +from denokv._pycompat.typing import ClassVar from denokv._pycompat.typing import Container +from denokv._pycompat.typing import Generic from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import MutableSequence from denokv._pycompat.typing import Never @@ -36,20 +43,728 @@ from denokv._pycompat.typing import Self from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeAlias +from denokv._pycompat.typing import TypedDict +from denokv._pycompat.typing import TypeGuard from denokv._pycompat.typing import TypeIs +from denokv._pycompat.typing import TypeVar from denokv._pycompat.typing import Union +from denokv._pycompat.typing import Unpack +from denokv._pycompat.typing import assert_never from denokv._pycompat.typing import cast +from denokv._pycompat.typing import override from denokv._pycompat.typing import runtime_checkable +from denokv._utils import frozen +from denokv.auth import EndpointInfo from denokv.backoff import Backoff from denokv.backoff import ExponentialBackoff from denokv.datapath import AnyKvKey +from denokv.datapath import CheckFailure from denokv.datapath import pack_key from denokv.kv_keys import KvKey from denokv.result import AnyFailure from denokv.result import AnySuccess +from denokv.result import is_err +KvNumberNameT = TypeVar("KvNumberNameT", bound=str, default=str) +NumberT = TypeVar("NumberT", bound=int | float, default=int | float) +KvNumberTypeT = TypeVar("KvNumberTypeT", default=object) -def encode_kv_write_value(value: object, *, v8_encoder: Encoder) -> dp_protobuf.KvValue: +KvNumberNameT_co = TypeVar("KvNumberNameT_co", bound=str, covariant=True, default=str) +NumberT_co = TypeVar( + "NumberT_co", bound=int | float, covariant=True, default=int | float +) +KvNumberTypeT_co = TypeVar("KvNumberTypeT_co", covariant=True, default=object) +U = TypeVar("U") + + +@total_ordering +@dataclass(frozen=True, unsafe_hash=True, **slots_if310()) +class KvNumberInfo(Generic[KvNumberNameT_co, NumberT, KvNumberTypeT]): + name: KvNumberNameT_co = field(init=False) + py_type: type[NumberT] = field(init=False) + kv_type: type[KvNumberTypeT] = field(init=False) + + @property + @abstractmethod + def default_limit(self) -> Limit[NumberT]: ... + + @abstractmethod + def validate_limit(self, limit: Limit[NumberT]) -> Limit[NumberT]: ... + + def __lt__(self, other: object) -> bool: + if isinstance(other, KvNumberInfo): + self_name: str = self.name # mypy needs help with inferring str + other_name: str = other.name + + if self_name == other_name and self != other: + raise RuntimeError("KvNumberInfo instances must have unique names") + return self_name < other_name + return NotImplemented + + def as_py_number(self, number: KvNumberTypeT | NumberT | int) -> NumberT: + if self.is_py_number(number): + return number + if self.is_kv_number(number) or self._is_compatible_int(number, target="py"): + return self.py_type(number) # type: ignore[arg-type,return-value] + raise self._describe_invalid_number(number, target="py") + + def as_kv_number(self, number: KvNumberTypeT | NumberT | int) -> KvNumberTypeT: + if self.is_kv_number(number): + return number + if self.is_py_number(number) or self._is_compatible_int(number, target="kv"): + return self.kv_type(number) # type: ignore[call-arg] + raise self._describe_invalid_number(number, target="kv") + + def _is_compatible_int( + self, number: object, *, target: Literal["py", "kv"] + ) -> TypeGuard[int]: + return type(number) is int + + def _describe_invalid_number( + self, number: object, *, target: Literal["py", "kv"] + ) -> Exception: + return with_notes( + TypeError( + f"number is not compatible with {self.name} {target} number type" + ), + f"number: {number!r} ({type(number)}), " f"{self.name}={self}", + ) + + def is_py_number(self, value: object) -> TypeGuard[NumberT]: + return isinstance(value, self.py_type) + + def is_kv_number(self, value: object) -> TypeGuard[KvNumberTypeT]: + return isinstance(value, self.kv_type) + + @abstractmethod + def get_sum_mutations( + self, + sum: Sum[KvNumberNameT_co, NumberT, KvNumberTypeT], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: ... + + @abstractmethod + def get_min_mutations( + self, + min: Min[KvNumberNameT_co, NumberT, KvNumberTypeT], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: ... + + @abstractmethod + def get_max_mutations( + self, + max: Max[KvNumberNameT_co, NumberT, KvNumberTypeT], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: ... + + +class V8KvNumberInfo(KvNumberInfo[KvNumberNameT_co, NumberT, KvNumberTypeT]): + @property + def default_limit(self) -> Limit[NumberT]: + return LIMIT_UNLIMITED + + def validate_limit(self, limit: Limit[NumberT]) -> Limit[NumberT]: + if limit.limit_exceeded not in ( + LimitExceededPolicy.ABORT, + LimitExceededPolicy.CLAMP, + ): + raise with_notes( + ValueError(f"Number type {self.name!r} does not support wrap limits"), + "Use 'u64' (KvU64) to wrap on 0, 2^64 - 1 bounds.", + ) + return limit + + @abstractmethod + def v8_encode_kv_number(self, value: KvNumberTypeT) -> bytes: ... + + @override + def get_sum_mutations( + self, + sum: Sum[KvNumberNameT_co, NumberT, KvNumberTypeT], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: + encoded_min = b"" + encoded_max = b"" + self.validate_limit(sum.limit) + if sum.limit.min is not None: + encoded_min = self.v8_encode_kv_number(self.as_kv_number(sum.limit.min)) + if sum.limit.max is not None: + encoded_max = self.v8_encode_kv_number(self.as_kv_number(sum.limit.max)) + + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_SUM, + key=pack_key(sum.key), + value=dp_protobuf.KvValue( + data=self.v8_encode_kv_number(self.as_kv_number(sum.delta)), + encoding=dp_protobuf.ValueEncoding.VE_V8, + ), + expire_at_ms=sum.expire_at_ms(), + sum_min=encoded_min, + sum_max=encoded_max, + sum_clamp=sum.limit.limit_exceeded is LimitExceededPolicy.CLAMP, + ) + + return [mutation] + + @override + def get_min_mutations( + self, + min: Min[KvNumberNameT_co, NumberT, KvNumberTypeT], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_SUM, + key=pack_key(min.key), + value=dp_protobuf.KvValue( + data=self.v8_encode_kv_number(self.as_kv_number(self.as_py_number(0))), + encoding=dp_protobuf.ValueEncoding.VE_V8, + ), + sum_max=self.v8_encode_kv_number(self.as_kv_number(min.value)), + sum_clamp=True, + expire_at_ms=min.expire_at_ms(), + ) + return [mutation] + + @override + def get_max_mutations( + self, + max: Max[KvNumberNameT_co, NumberT, KvNumberTypeT], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_SUM, + key=pack_key(max.key), + value=dp_protobuf.KvValue( + data=self.v8_encode_kv_number(self.as_kv_number(self.as_py_number(0))), + encoding=dp_protobuf.ValueEncoding.VE_V8, + ), + sum_min=self.v8_encode_kv_number(self.as_kv_number(max.value)), + sum_clamp=True, + expire_at_ms=max.expire_at_ms(), + ) + return [mutation] + + +class BigIntKvNumberInfo(V8KvNumberInfo[Literal["bigint"], int, JSBigInt]): + __slots__ = () + name = "bigint" + py_type = int + kv_type = JSBigInt + + def v8_encode_kv_number(self, value: JSBigInt) -> bytes: + return encode_v8_bigint(value) + + @override + def is_py_number(self, value: object) -> TypeGuard[int]: + # Don't treat JSBigInt instances as being py numbers so that we downcast + # JSBigInt to plain int in as_py_number(). This is important to allow + # other things to not treat JSBigInt as the same as int. + return (not self.is_kv_number(value)) and super().is_py_number(value) + + +class FloatKvNumberInfo(V8KvNumberInfo[Literal["float"], float, float]): + __slots__ = () + name = "float" + py_type = float + kv_type = float + + def v8_encode_kv_number(self, value: float) -> bytes: + return encode_v8_number(value) + + def _is_int_in_float_safe_range(self, value: object) -> TypeGuard[int]: + # int is assignable to float in Python's type system, but + # isinstance(int(x), float) is False. We don't allow subclasses of int, + # because JSBigInt is a subclass of int, and we don't want to treat them + # as FloatKvNumberInfo values. + return type(value) is int and value in FLOAT64_SAFE_INT_RANGE + + @override + def is_kv_number(self, value: object) -> TypeGuard[float]: + return self._is_int_in_float_safe_range(value) or super().is_kv_number(value) + + @override + def is_py_number(self, value: object) -> TypeGuard[float]: + return self._is_int_in_float_safe_range(value) or super().is_py_number(value) + + @override + def _is_compatible_int( + self, number: object, *, target: Literal["py", "kv"] + ) -> TypeGuard[int]: + # only allow conversions from plain int that is in the safe range. + return self._is_int_in_float_safe_range(number) + + @override + def _describe_invalid_number( + self, number: object, *, target: Literal["py", "kv"] + ) -> Exception: + err = super()._describe_invalid_number(number, target=target) + if type(number) is int and not self._is_int_in_float_safe_range(number): + return with_notes( + ValueError(*err.args), + "The int is too large to represent as a 64-bit floating point value.", + from_exception=err, + ) + return err + + +class U64KvNumberInfo(KvNumberInfo[Literal["u64"], int, KvU64]): + __slots__ = () + name = "u64" + py_type = int + kv_type = KvU64 + + @property + def default_limit(self) -> Limit[int]: + return LIMIT_KVU64 + + @override + def validate_limit(self, limit: Limit[int]) -> Limit[int]: + if limit.limit_exceeded is LimitExceededPolicy.ABORT: + raise with_notes( + ValueError(f"Number type {self.name!r} does not support abort limits"), + "Use 'bigint' (JSBigInt) or 'float' (int/float) to wrap on " + "0, 2^64 - 1 bounds.", + ) + + if limit.limit_exceeded is LimitExceededPolicy.WRAP and limit != LIMIT_KVU64: + raise with_notes( + ValueError( + f"Number type {self.name!r} wrap limit's min, max " + f"bounds cannot be changed" + ), + "'u64' (KvU64) can only wrap at 0 and 2^64 - 1. It can use " + "clamp with custom bounds through.", + ) + return limit + + @override + def get_sum_mutations( + self, + sum: Sum[Literal["u64"], int, KvU64], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: + self.validate_limit(sum.limit) + assert sum.limit.limit_exceeded is not LimitExceededPolicy.ABORT + if sum.limit.limit_exceeded is LimitExceededPolicy.WRAP: + return self._get_sum_wrap_mutations(sum) + elif sum.limit.limit_exceeded is LimitExceededPolicy.CLAMP: + return self._get_sum_clamp_mutations(sum) + else: + assert_never(sum.limit.limit_exceeded) + + def _get_sum_clamp_mutations( + self, sum: Sum[Literal["u64"], int, KvU64] + ) -> Sequence[dp_protobuf.Mutation]: + assert sum.limit.limit_exceeded is LimitExceededPolicy.CLAMP + + limit_min = 0 if sum.limit.min is None else sum.limit.min + limit_max = KvU64.RANGE.stop - 1 if sum.limit.max is None else sum.limit.max + if limit_min not in KvU64.RANGE: + raise with_notes( + ValueError("sum.limit.min must be in KvU64.RANGE"), + f"sum.limit.min: {limit_min}", + ) + if limit_max not in KvU64.RANGE: + raise with_notes( + ValueError("sum.limit.max must be in KvU64.RANGE"), + f"sum.limit.max: {limit_max}", + ) + + delta = self._normalise_clamp_delta(sum.delta) + + if delta < 0: + return self._get_negative_sum_clamp_mutations( + sum, delta, limit_min, limit_max + ) + else: + return self._get_positive_sum_clamp_mutations( + sum, delta, limit_min, limit_max + ) + + def _get_positive_sum_clamp_mutations( + self, + sum: Sum[Literal["u64"], int, KvU64], + delta: int, + limit_min: int, + limit_max: int, + ) -> Sequence[dp_protobuf.Mutation]: + assert delta in KvU64.RANGE + assert limit_min in KvU64.RANGE + assert limit_max in KvU64.RANGE + + # When the upper limit is <= the delta, the result is always clamped at the + # upper limit. Likewise if the lower limit pushes the result above the upper + # limit, the upper limit is used (it's applied last). + min_result = delta + if limit_max <= min_result or limit_max <= (limit_min or 0): + return [self._mutate_set(sum, KvU64(limit_max))] # result is constant + + mutations = list[dp_protobuf.Mutation]() + + if limit_min >= limit_max or limit_min <= delta: + limit_min = 0 # lower bound can have no effect on the result + + # We clamp the final result to be <= the limit_max by clamping the db + # value to the highest value that won't exceed the limit_max when the + # delta is added. + + # delta is always < limit_max, otherwise the result is constant, which + # is handled above. + max_start = limit_max - delta + assert max_start > 0 + mutations.append(self._mutate_min(sum, KvU64(max_start))) + + if delta != 0: + mutations.append(self._mutate_sum(sum, KvU64(delta))) + + if limit_min > 0: + mutations.append(self._mutate_max(sum, KvU64(limit_min))) + + return mutations + + def _get_negative_sum_clamp_mutations( + self, + sum: Sum[Literal["u64"], int, KvU64], + delta: int, + limit_min: int, + limit_max: int, + ) -> Sequence[dp_protobuf.Mutation]: + assert -delta in KvU64.RANGE + assert limit_min in KvU64.RANGE + assert limit_max in KvU64.RANGE + + # If value after adding the (negative) delta is always <= the lower + # limit, the lower limit is always the result. However the upper limit + # applies last, so if the upper limit is lower than the lower limit, it + # applies instead. + if limit_max <= limit_min: + return [self._mutate_set(sum, KvU64(limit_max))] + max_result = (KvU64.RANGE.stop - 1) + delta + if limit_min >= max_result: + assert limit_max > limit_min + return [self._mutate_set(sum, KvU64(limit_min))] + + mutations = list[dp_protobuf.Mutation]() + + # Offset the start to prevent it going negative after adding the delta + min_start = abs(delta) + limit_min + # min_start cannot exceed the range, because abs(delta) values >= the + # difference between limit_min and the top of the range trigger the + # constant result short-circuit above, as the result is always limit_min + assert min_start in KvU64.RANGE + mutations.append(self._mutate_max(sum, KvU64(min_start))) + + # Make the negative delta a positive delta that overflows to the result + # of applying the original negative delta offset. + if delta != 0: + delta = KvU64.RANGE.stop + delta + assert delta in KvU64.RANGE + + # Apply the delta (effectively subtracting) + mutations.append(self._mutate_sum(sum, KvU64(delta))) + + if limit_max >= max_result: + # limit_max can have no effect on the result + assert limit_max > limit_min + else: + mutations.append(self._mutate_min(sum, KvU64(limit_max))) + + return mutations + + def _get_sum_wrap_mutations( + self, sum: Sum[Literal["u64"], int, KvU64] + ) -> Sequence[dp_protobuf.Mutation]: + assert sum.limit.limit_exceeded is LimitExceededPolicy.WRAP + # Only one wrapping limit is available for KvU64 + # (the default 64-bit uint bounds). + if sum.limit != LIMIT_KVU64: + raise with_notes( + ValueError( + f"Deno KV does not support {LimitExceededPolicy.WRAP} with " + f"non-default min/max for KvU64 values" + ), + f"sum.limit: {sum.limit}", + ) + + delta = self._normalise_wrap_delta(sum.delta) + # M_SUM mutations for KvU64 only support positive delta values, because + # KvU64 is unsigned. We support negative effective deltas by taking + # advantage of integer overflow/wrapping — we add a positive value that + # overflows to the equivalent of subtracting delta. + # + # For example to subtract 2 from 10, we are calculating + # (10 + (2**64 - 2)) % 2**64 = 8 + if delta < 0: + delta = KvU64.RANGE.stop + delta + assert delta in KvU64.RANGE + + return [self._mutate_sum(sum, KvU64(delta))] + + @override + def get_min_mutations( + self, + min: Min[Literal["u64"], int, KvU64], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_MIN, + key=pack_key(min.key), + value=dp_protobuf.KvValue( + data=bytes(KvU64(min.value)), + encoding=dp_protobuf.ValueEncoding.VE_LE64, + ), + expire_at_ms=min.expire_at_ms(), + ) + return [mutation] + + @override + def get_max_mutations( + self, + max: Max[Literal["u64"], int, KvU64], + *, + v8_encoder: Encoder | None = None, + ) -> Sequence[dp_protobuf.Mutation]: + mutation = dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_MAX, + key=pack_key(max.key), + value=dp_protobuf.KvValue( + data=bytes(KvU64(max.value)), + encoding=dp_protobuf.ValueEncoding.VE_LE64, + ), + expire_at_ms=max.expire_at_ms(), + ) + return [mutation] + + @staticmethod + def _normalise_wrap_delta(delta: int) -> int: + """ + Normalise a sum delta value to be within +/- 2**64 for limit type wrap. + + This method wraps delta values larger than 2**64 - 1, in contrast with + _normalise_clamp_delta(), which clamps at the max value. + + Examples + -------- + >>> U64KvNumberInfo._normalise_wrap_delta(-5) + -5 + >>> U64KvNumberInfo._normalise_wrap_delta(-5 - 2**64) + -5 + >>> U64KvNumberInfo._normalise_wrap_delta(5) + 5 + >>> U64KvNumberInfo._normalise_wrap_delta(5 + 2**64) + 5 + >>> U64KvNumberInfo._normalise_wrap_delta(2**64) + 0 + >>> U64KvNumberInfo._normalise_wrap_delta(-2**64) + 0 + """ + pos_wrapped_delta = abs(delta) % KvU64.RANGE.stop + return -pos_wrapped_delta if delta < 0 else pos_wrapped_delta + + @staticmethod + def _normalise_clamp_delta(delta: int) -> int: + """ + Normalise a sum delta value to be within +/- 2**64 for limit type clamp. + + This method clamps delta values larger than 2**64 - 1 at the max value, + in contrast with _normalise_wrap_delta(), which wraps over the max value. + + Examples + -------- + >>> U64KvNumberInfo._normalise_clamp_delta(-5) + -5 + >>> U64KvNumberInfo._normalise_clamp_delta(-5 - 2**64) + -18446744073709551615 + >>> U64KvNumberInfo._normalise_clamp_delta(5) + 5 + >>> U64KvNumberInfo._normalise_clamp_delta(5 + 2**64) + 18446744073709551615 + >>> U64KvNumberInfo._normalise_clamp_delta(2**64) + 18446744073709551615 + >>> U64KvNumberInfo._normalise_clamp_delta(-2**64) + -18446744073709551615 + """ + pos_clamped_delta = min(abs(delta), KvU64.RANGE.stop - 1) + return -pos_clamped_delta if delta < 0 else pos_clamped_delta + + def _mutate( + self, + sum: Sum[Literal["u64"], int, KvU64], + mutation_type: dp_protobuf.MutationType, + value: KvU64, + ) -> dp_protobuf.Mutation: + return dp_protobuf.Mutation( + key=pack_key(sum.key), + expire_at_ms=sum.expire_at_ms(), + mutation_type=mutation_type, + value=dp_protobuf.KvValue(data=bytes(value), encoding=dp_protobuf.VE_LE64), + ) + + def _mutate_set( + self, sum: Sum[Literal["u64"], int, KvU64], value: KvU64 + ) -> dp_protobuf.Mutation: + return self._mutate(sum, dp_protobuf.MutationType.M_SET, value) + + def _mutate_max( + self, sum: Sum[Literal["u64"], int, KvU64], value: KvU64 + ) -> dp_protobuf.Mutation: + return self._mutate(sum, dp_protobuf.MutationType.M_MAX, value) + + def _mutate_min( + self, sum: Sum[Literal["u64"], int, KvU64], value: KvU64 + ) -> dp_protobuf.Mutation: + return self._mutate(sum, dp_protobuf.MutationType.M_MIN, value) + + def _mutate_sum( + self, sum: Sum[Literal["u64"], int, KvU64], value: KvU64 + ) -> dp_protobuf.Mutation: + return self._mutate(sum, dp_protobuf.MutationType.M_SUM, value) + + +@frozen +@total_ordering +class KvNumber(Enum): + """The types of numbers that the atomic sum/min/max operations can be used with.""" + + # _value_: KvNumberInfo + + bigint = BigIntKvNumberInfo() + """A JavaScript bigint — arbitrary-precision integer.""" + float = FloatKvNumberInfo() + """A JavaScript number — 64-bit floating-point number.""" + u64 = U64KvNumberInfo() + """A Deno KV-specific 64-bit unsigned integer.""" + + @overload + @classmethod + def resolve( + cls, identifier: BigIntKvNumberIdentifier + ) -> Literal[KvNumber.bigint]: ... + + @overload + @classmethod + def resolve( + cls, identifier: FloatKvNumberIdentifier + ) -> Literal[KvNumber.float]: ... + + @overload + @classmethod + def resolve(cls, identifier: U64KvNumberIdentifier) -> Literal[KvNumber.u64]: ... + + @overload + @classmethod + def resolve(cls, identifier: KvNumber) -> LiteralKvNumber: ... + + @overload + @classmethod + def resolve(cls, /, *, number: KvU64) -> Literal[KvNumber.u64]: ... + + @overload + @classmethod + def resolve(cls, /, *, number: JSBigInt) -> Literal[KvNumber.bigint]: ... # pyright: ignore[reportOverlappingOverload] + + @overload + @classmethod + def resolve(cls, /, *, number: float_) -> Literal[KvNumber.float]: ... + + @classmethod + def resolve( + cls, + identifier: KvNumberIdentifier | None = None, + *, + number: KvU64 | JSBigInt | __builtins__.float | None = None, + ) -> LiteralKvNumber: + if identifier is not None: + return cast(LiteralKvNumber, KvNumber(identifier)) + + if number is None: + raise TypeError("resolve() missing 1 required argument: 'identifier'") + try: + return cast(LiteralKvNumber, KvNumber(type(number))) + except Exception as e: + raise TypeError( + f"number is not supported by any KvNumber: {number!r}" + ) from e + + @classmethod + def _missing_(cls, value: Any) -> KvNumber | None: + return cls.__members__.get(value) + + def __lt__(self, other: object) -> bool: + if type(other) is KvNumber: + self_value: KvNumberInfo[Any, Any, Any] = self.value + other_value: KvNumberInfo[Any, Any, Any] = other.value + return self_value < other_value + return NotImplemented + + +LiteralKvNumber: TypeAlias = Literal[KvNumber.bigint, KvNumber.float, KvNumber.u64] +KvNumber._value2member_map_[JSBigInt] = KvNumber.bigint +KvNumber._value2member_map_[float] = KvNumber.float +# int values correspond to KvNumber (float64) because JavaScript integer values +# are float64, and v8serialize by default encodes and decodes int values as +# Number not Bigint (JSBigInt is used for BigInt). +KvNumber._value2member_map_[int] = KvNumber.float +KvNumber._value2member_map_[KvU64] = KvNumber.u64 + +BigIntKvNumberIdentifier = Literal["bigint", KvNumber.bigint] | type[JSBigInt] +FloatKvNumberIdentifier = Literal["float", KvNumber.float] | type[float] +U64KvNumberIdentifier = Literal["u64", KvNumber.u64] | type[KvU64] +KvNumberIdentifier = ( + BigIntKvNumberIdentifier + | FloatKvNumberIdentifier + | U64KvNumberIdentifier + | KvNumber +) + + +def encode_v8_number(number: float, /) -> bytes: + """Encode a Python float as a JavaScript Number in V8 serialization format.""" + if not KvNumber.float.value.is_kv_number(number): + raise with_notes( + TypeError("number must be a float or int in the float-safe range"), + f"number: {number!r} ({type(number)})", + ) + wts = WritableTagStream() + wts.write_header() + # It's OK to pass an int, they'll be encoded as float64 + wts.write_double(number) + return bytes(wts.data) + + +def encode_v8_bigint(number: JSBigInt, /) -> bytes: + """Encode a Python JSBigInt as a JavaScript BigInt in V8 serialization format.""" + if not KvNumber.bigint.value.is_kv_number(number): + raise TypeError(f"number must be a JSBigInt, not {type(number)}") + wts = WritableTagStream() + wts.write_header() + wts.write_bigint(number) + return bytes(wts.data) + + +@overload +def encode_kv_write_value( + value: KvU64 | bytes | JSBigInt | float, *, v8_encoder: Encoder | None = None +) -> dp_protobuf.KvValue: ... + + +@overload +def encode_kv_write_value( + value: object, *, v8_encoder: Encoder +) -> dp_protobuf.KvValue: ... + + +def encode_kv_write_value( + value: object, *, v8_encoder: Encoder | None = None +) -> dp_protobuf.KvValue: if isinstance(value, KvU64): return dp_protobuf.KvValue( data=bytes(value), @@ -59,130 +774,478 @@ def encode_kv_write_value(value: object, *, v8_encoder: Encoder) -> dp_protobuf. return dp_protobuf.KvValue( data=value, encoding=dp_protobuf.ValueEncoding.VE_BYTES ) + elif isinstance(value, JSBigInt): + return dp_protobuf.KvValue( + data=encode_v8_bigint(value), encoding=dp_protobuf.ValueEncoding.VE_V8 + ) + elif isinstance(value, float): + return dp_protobuf.KvValue( + data=encode_v8_number(value), encoding=dp_protobuf.ValueEncoding.VE_V8 + ) else: + if v8_encoder is None: + raise TypeError( + "v8_encoder cannot be None when encoding an arbitrary object" + ) return dp_protobuf.KvValue( data=bytes(v8_encoder.encode(value)), encoding=dp_protobuf.ValueEncoding.VE_V8, ) +class MutationOptions(TypedDict, total=False): + expire_at: datetime | None + + +class LimitOptions(Generic[NumberT], TypedDict, total=False): + clamp_over: NumberT | None + clamp_under: NumberT | None + abort_over: NumberT | None + abort_under: NumberT | None + limit: Limit[NumberT] | None + + +class SumOptions(LimitOptions[NumberT_co], MutationOptions): + """Keyword arguments accepted by `sum()`/`Sum()`.""" + + +class SumArgs( + SumOptions[NumberT], Generic[KvNumberNameT, NumberT, KvNumberTypeT], total=False +): + """All arguments accepted by `sum()`/`Sum()`.""" + + key: AnyKvKey + delta: JSBigInt | float | KvU64 | NumberT | KvNumberTypeT + number_type: ( + KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] | KvNumberIdentifier | None + ) + + @dataclass -class PlannedWrite(AtomicWriteRepresentation): +class PlannedWrite(AtomicWriteRepresentationWriter["CompletedWrite"]): kv: KvWriter | None = field(default=None) - checks: MutableSequence[AnyKeyVersion] = field(default_factory=list) - mutations: MutableSequence[Mutation] = field(default_factory=list) - enqueues: MutableSequence[Enqueue] = field(default_factory=list) - - async def write(self, kv: KvWriter | None = None) -> CompletedWrite: - kv = self.kv if kv is None else kv - if kv is None: - raise TypeError("No kv was provided to write") - return await kv.write(self) - - def as_protobuf(self, *, v8_encoder: Encoder) -> AtomicWrite: - return AtomicWrite( - checks=[ - dp_protobuf.Check( - key=pack_key(check.key), versionstamp=check.versionstamp + checks: MutableSequence[CheckRepresentation] = field(default_factory=list) + mutations: MutableSequence[MutationRepresentation] = field(default_factory=list) + enqueues: MutableSequence[EnqueueRepresentation] = field(default_factory=list) + v8_encoder: Encoder | None = field(default=None, kw_only=True) + + @override + async def write( + self, kv: KvWriter | None = None, *, v8_encoder: Encoder | None = None + ) -> CompletedWrite: + _kv = self.kv if kv is None else kv + if _kv is None: + raise TypeError( + f"{type(self).__name__}.write() must get a value for its 'kv' " + "argument when 'self.kv' isn't set" + ) + + _v8_encoder = self.v8_encoder if v8_encoder is None else v8_encoder + if _v8_encoder is None: + _v8_encoder = get_v8_encoder(_kv).value_or(None) + if _v8_encoder is None: + raise TypeError( + f"{type(self).__name__}.write() must get a value for its " + "'v8_encoder' keyword argument when 'self.v8_encoder' isn't " + "set and 'kv' does not provide one." + ) + + (pb_atomic_write,) = self.as_protobuf(v8_encoder=_v8_encoder) + # Copy the write components so that the results are not affected if the + # PlannedWrite is modified during this write. + checks = tuple(self.checks) + mutations = tuple(self.mutations) + enqueues = tuple(self.enqueues) + result = await _kv.write(protobuf_atomic_write=pb_atomic_write) + + if is_err(result): + if isinstance(result.error, CheckFailure): + check_failure = result.error + return ConflictedWrite( + failed_checks=list(check_failure.failed_check_indexes), + checks=checks, + mutations=mutations, + enqueues=enqueues, + endpoint=check_failure.endpoint, ) - for check in self.checks - ], - mutations=[ - mut.as_protobuf(v8_encoder=v8_encoder) for mut in self.mutations - ], - enqueues=[enq.as_protobuf(v8_encoder=v8_encoder) for enq in self.enqueues], + raise result.error + + versionstamp, endpoint = result.value + return CommittedWrite( + versionstamp=versionstamp, + checks=checks, + mutations=mutations, + enqueues=enqueues, + endpoint=endpoint, + ) + + def as_protobuf(self, *, v8_encoder: Encoder) -> tuple[AtomicWrite]: + return ( + AtomicWrite( + checks=[ + pb_msg + for check in self.checks + for pb_msg in check.as_protobuf(v8_encoder=v8_encoder) + ], + mutations=[ + pb_msg + for mut in self.mutations + for pb_msg in mut.as_protobuf(v8_encoder=v8_encoder) + ], + enqueues=[ + pb_msg + for enq in self.enqueues + for pb_msg in enq.as_protobuf(v8_encoder=v8_encoder) + ], + ), ) @overload def check(self, key: AnyKvKey, versionstamp: VersionStamp | None) -> Self: ... + @overload + def check(self, check: CheckRepresentation, /) -> Self: ... + @overload def check(self, check: AnyKeyVersion, /) -> Self: ... def check( - self, key: AnyKvKey | AnyKeyVersion, versionstamp: VersionStamp | None = None + self, + key: CheckRepresentation | AnyKeyVersion | AnyKvKey, + versionstamp: VersionStamp | None = None, ) -> Self: - if isinstance(key, AnyKeyVersion): + if isinstance(key, CheckRepresentation): + if versionstamp is not None: + raise TypeError( + "'versionstamp' argument cannot be set when the first argument " + "to check() is an object with an 'as_protobuf' method" + ) self.checks.append(key) + elif isinstance(key, AnyKeyVersion): if versionstamp is not None: raise TypeError( - "versionstamp argument cannot be passed when first argument " - "is check object with a key and versionstamp" + "'versionstamp' argument cannot be set when the first argument " + "to check() is an object with 'key' and 'versionstamp' attributes" ) + self.checks.append(Check(key.key, key.versionstamp)) else: self.checks.append(Check(key, versionstamp)) return self + def check_key_has_version(self, key: AnyKvKey, versionstamp: VersionStamp) -> Self: + self.checks.append(Check.for_key_with_version(key, versionstamp)) + return self + + def check_key_not_set(self, key: AnyKvKey) -> Self: + self.checks.append(Check.for_key_not_set(key)) + return self + def set(self, key: AnyKvKey, value: object, *, versioned: bool = False) -> Self: return self.mutate(Set(key, value, versioned=versioned)) + # The overloads here have two categories: Firstly overloads based on known + # Known KvNumber enum numbers — bigint, float and u64. Secondly, + # generic/catch-all for any KvNumberInfo instance. @overload - def sum(self, sum: Sum, /) -> Self: ... + def sum( + self, + key: AnyKvKey, + delta: JSBigInt, + number_type: None = None, + **options: Unpack[SumOptions[int]], + ) -> Self: ... @overload - def sum(self, key: AnyKvKey, value: KvU64) -> Self: ... + def sum( + self, + key: AnyKvKey, + delta: int | JSBigInt, + number_type: BigIntKvNumberIdentifier, + **options: Unpack[SumOptions[int]], + ) -> Self: ... @overload def sum( self, key: AnyKvKey, - value: int | float, - *, - limit_min: int | float | None = None, - limit_max: int | float | None = None, - limit_exceeded: LimitExceededInput | None = None, - limit: Limit | None = None, + delta: KvU64, + number_type: None = None, + **options: Unpack[SumOptions[int]], ) -> Self: ... + @overload def sum( self, - key: AnyKvKey | Sum, - value: int | float | KvU64 | None = None, - *, - limit_min: int | float | None = None, - limit_max: int | float | None = None, - limit_exceeded: LimitExceededInput | None = None, - limit: Limit | None = None, + key: AnyKvKey, + delta: int | KvU64, + number_type: U64KvNumberIdentifier, + **options: Unpack[SumOptions[int]], + ) -> Self: ... + + @overload + def sum( + self, + key: AnyKvKey, + delta: float, + number_type: FloatKvNumberIdentifier | None = None, + **options: Unpack[SumOptions[float]], + ) -> Self: ... + + @overload + def sum( + self, + key: AnyKvKey, + delta: NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + # Can't use float limits unless the float type is explicitly being used, + # as float is incompatible with the other number types, but int is + # compatible. + **options: Unpack[SumOptions[NumberT]], + ) -> Self: ... + + def sum( + self, + key: AnyKvKey, + delta: JSBigInt | float | KvU64 | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + **options: Unpack[SumOptions[NumberT]], ) -> Self: - if isinstance(key, Sum): - if value is not None: - raise TypeError("sum() takes no arguments after 'sum'") - return self.mutate(key) + delta = cast(NumberT | KvNumberTypeT, delta) + number_type = cast( + KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type + ) + return self.mutate(Sum(key, delta, number_type, **options)) - if value is None: - raise TypeError("sum() missing 1 required positional argument: 'value'") - if limit is None: - if not (limit_min is None and limit_max is None and limit_exceeded is None): - limit = Limit( - min=limit_min, max=limit_max, limit_exceeded=limit_exceeded - ) - else: - limit_min = limit.min if limit_min is None else limit_min - limit_max = limit.max if limit_max is None else limit_max - limit_exceeded = ( - cast(LimitExceededInput, limit.limit_exceeded) - if limit_exceeded is None - else limit_exceeded - ) - limit = Limit(limit_min, limit_max, limit_exceeded) + def sum_bigint( + self, + key: AnyKvKey, + delta: int | JSBigInt, + **options: Unpack[SumOptions[int]], + ) -> Self: + return self.sum(key, delta, number_type=KvNumber.bigint, **options) - return self.mutate(Sum(key, value, limit=limit)) + def sum_float( + self, + key: AnyKvKey, + delta: float, + **options: Unpack[SumOptions[float]], + ) -> Self: + return self.sum(key, delta, number_type=KvNumber.float, **options) - def min(self, key: AnyKvKey, value: int | KvU64) -> Self: - return self.mutate(Min(key, value)) + def sum_kvu64( + self, + key: AnyKvKey, + delta: int | KvU64, + **options: Unpack[SumOptions[int]], + ) -> Self: + return self.sum(key, delta, number_type=KvNumber.u64, **options) - def max(self, key: AnyKvKey, value: int | KvU64) -> Self: - return self.mutate(Max(key, value)) + @overload + def min( + self, + key: AnyKvKey, + value: JSBigInt, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def min( + self, + key: AnyKvKey, + value: int | JSBigInt, + number_type: BigIntKvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def min( + self, + key: AnyKvKey, + value: KvU64, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def min( + self, + key: AnyKvKey, + value: int | KvU64, + number_type: U64KvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def min( + self, + key: AnyKvKey, + value: float, + number_type: FloatKvNumberIdentifier | None = None, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def min( + self, + key: AnyKvKey, + value: NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + # Can't use float limits unless the float type is explicitly being used, + # as float is incompatible with the other number types, but int is + # compatible. + **options: Unpack[MutationOptions], + ) -> Self: ... + + def min( + self, + key: AnyKvKey, + value: JSBigInt | float | KvU64 | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + **options: Unpack[MutationOptions], + ) -> Self: + value = cast(NumberT | KvNumberTypeT, value) + number_type = cast( + KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type + ) + return self.mutate(Min(key, value, number_type, **options)) + + def min_bigint( + self, + key: AnyKvKey, + value: int | JSBigInt, + **options: Unpack[MutationOptions], + ) -> Self: + return self.min(key, value, number_type=KvNumber.bigint, **options) + + def min_float( + self, + key: AnyKvKey, + value: float, + **options: Unpack[MutationOptions], + ) -> Self: + return self.min(key, value, number_type=KvNumber.float, **options) + + def min_kvu64( + self, + key: AnyKvKey, + value: int | KvU64, + **options: Unpack[MutationOptions], + ) -> Self: + return self.min(key, value, number_type=KvNumber.u64, **options) + + @overload + def max( + self, + key: AnyKvKey, + value: JSBigInt, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def max( + self, + key: AnyKvKey, + value: int | JSBigInt, + number_type: BigIntKvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def max( + self, + key: AnyKvKey, + value: KvU64, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def max( + self, + key: AnyKvKey, + value: int | KvU64, + number_type: U64KvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def max( + self, + key: AnyKvKey, + value: float, + number_type: FloatKvNumberIdentifier | None = None, + **options: Unpack[MutationOptions], + ) -> Self: ... + + @overload + def max( + self, + key: AnyKvKey, + value: NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + # Can't use float limits unless the float type is explicitly being used, + # as float is incompatible with the other number types, but int is + # compatible. + **options: Unpack[MutationOptions], + ) -> Self: ... + + def max( + self, + key: AnyKvKey, + value: JSBigInt | float | KvU64 | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + **options: Unpack[MutationOptions], + ) -> Self: + value = cast(NumberT | KvNumberTypeT, value) + number_type = cast( + KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type + ) + return self.mutate(Max(key, value, number_type, **options)) + + def max_bigint( + self, + key: AnyKvKey, + value: int | JSBigInt, + **options: Unpack[MutationOptions], + ) -> Self: + return self.max(key, value, number_type=KvNumber.bigint, **options) + + def max_float( + self, + key: AnyKvKey, + value: float, + **options: Unpack[MutationOptions], + ) -> Self: + return self.max(key, value, number_type=KvNumber.float, **options) + + def max_kvu64( + self, + key: AnyKvKey, + value: int | KvU64, + **options: Unpack[MutationOptions], + ) -> Self: + return self.max(key, value, number_type=KvNumber.u64, **options) def delete(self, key: AnyKvKey) -> Self: + if isinstance(key, Delete): + return self.mutate(key) return self.mutate(Delete(key)) - def mutate(self, mutation: Mutation) -> Self: + def mutate(self, mutation: MutationRepresentation) -> Self: self.mutations.append(mutation) return self @overload def enqueue(self, enqueue: Enqueue, /) -> Self: ... + @overload def enqueue( self, @@ -192,6 +1255,7 @@ def enqueue( retry_delays: Backoff | None = None, dead_letter_keys: Sequence[AnyKvKey] | None = None, ) -> Self: ... + def enqueue( self, message: object | Enqueue, @@ -213,25 +1277,27 @@ def enqueue( return self -@dataclass(init=False, **slots_if310()) +@dataclass(init=False, unsafe_hash=True, **slots_if310()) class ConflictedWrite(FrozenAfterInitDataclass, AnyFailure): if TYPE_CHECKING: def _AnyFailure_marker(self, no_call: Never) -> Never: ... ok: Literal[False] - conflicts: Mapping[AnyKvKey, Check] + conflicts: Mapping[AnyKvKey, CheckRepresentation] versionstamp: None - checks: Sequence[Check] - mutations: Sequence[Mutation] - enqueues: Sequence[Enqueue] + checks: Sequence[CheckRepresentation] + mutations: Sequence[MutationRepresentation] + enqueues: Sequence[EnqueueRepresentation] + endpoint: EndpointInfo def __init__( self, failed_checks: Sequence[int], - checks: Sequence[Check], - mutations: Sequence[Mutation], - enqueues: Sequence[Enqueue], + checks: Sequence[CheckRepresentation], + mutations: Sequence[MutationRepresentation], + enqueues: Sequence[EnqueueRepresentation], + endpoint: EndpointInfo, ) -> None: self.ok = False try: @@ -244,27 +1310,40 @@ def __init__( self.checks = tuple(checks) self.mutations = tuple(mutations) self.enqueues = tuple(enqueues) + self.endpoint = endpoint + + def __repr__(self) -> str: + return ( + f"<{type(self).__name__} " + f"NOT APPLIED to {str(self.endpoint.url)!r} with " + f"{len(self.conflicts)}/{len(self.checks)} checks CONFLICTING, " + f"{len(self.mutations)} mutations, " + f"{len(self.enqueues)} enqueues" + f">" + ) -@dataclass(init=False, **slots_if310()) +@dataclass(init=False, unsafe_hash=True, **slots_if310()) class CommittedWrite(FrozenAfterInitDataclass, AnySuccess): if TYPE_CHECKING: def _AnySuccess_marker(self, no_call: Never) -> Never: ... ok: Literal[True] - conflicts: Mapping[KvKey, Check] # empty + conflicts: Mapping[KvKey, CheckRepresentation] # empty versionstamp: VersionStamp - checks: Sequence[Check] - mutations: Sequence[Mutation] - enqueues: Sequence[Enqueue] + checks: Sequence[CheckRepresentation] + mutations: Sequence[MutationRepresentation] + enqueues: Sequence[EnqueueRepresentation] + endpoint: EndpointInfo def __init__( self, versionstamp: VersionStamp, - checks: Sequence[Check], - mutations: Sequence[Mutation], - enqueues: Sequence[Enqueue], + checks: Sequence[CheckRepresentation], + mutations: Sequence[MutationRepresentation], + enqueues: Sequence[EnqueueRepresentation], + endpoint: EndpointInfo, ) -> None: self.ok = True self.conflicts = MappingProxyType({}) @@ -272,6 +1351,17 @@ def __init__( self.checks = tuple(checks) self.mutations = tuple(mutations) self.enqueues = tuple(enqueues) + self.endpoint = endpoint + + def __repr__(self) -> str: + return ( + f"<{type(self).__name__} " + f"version 0x{self.versionstamp} to {str(self.endpoint.url)!r} with " + f"{len(self.checks)} checks, " + f"{len(self.mutations)} mutations, " + f"{len(self.enqueues)} enqueues" + f">" + ) CompletedWrite: TypeAlias = Union[CommittedWrite, ConflictedWrite] @@ -296,28 +1386,79 @@ def versionstamp(self) -> VersionStamp | None: ... versionstamp = ... +class CheckRepresentation( + SingleProtobufMessageRepresentation[dp_protobuf.Check], AnyKeyVersion +): + __slots__ = () + + # Check never needs an Encoder, so override the signature to make it optional. + @override + @abstractmethod + def as_protobuf( + self, *, v8_encoder: Encoder | None = None + ) -> tuple[dp_protobuf.Check]: ... + + @dataclass(frozen=True, **slots_if310()) -class Check(AnyKeyVersion): +class Check(CheckRepresentation, AnyKeyVersion): + """ + A condition that must hold for a database write operation to be applied. + + By applying checks to a write operation, writes can ensure that the changes + they make are changing the existing values they expect. Without appropriate + checks, write operations could overwrite another writer's changes to the + database. + + Checks are part of Deno KV's + [Multi-version concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) + support. + """ + key: AnyKvKey + """The key that the check applies to.""" versionstamp: VersionStamp | None + """ + The version that that the key's value must have for the check to succeed. + + `None` means the key must not have a value set for the check to succeed. + """ + + @classmethod + def for_key_with_version(cls, key: AnyKvKey, versionstamp: VersionStamp) -> Self: + return cls(key, versionstamp) + + @classmethod + def for_key_not_set(cls, key: AnyKvKey) -> Self: + return cls(key, versionstamp=None) + + @override + def as_protobuf( + self, *, v8_encoder: Encoder | None = None + ) -> tuple[dp_protobuf.Check]: + return ( + dp_protobuf.Check(key=pack_key(self.key), versionstamp=self.versionstamp), + ) + + +class MutationRepresentation(ProtobufMessageRepresentation[dp_protobuf.Mutation]): + __slots__ = () + + @abstractmethod + def as_protobuf(self, *, v8_encoder: Encoder) -> Sequence[dp_protobuf.Mutation]: ... @dataclass(init=False, **slots_if310()) -class Mutation(FrozenAfterInitDataclass, ABC): +class Mutation(FrozenAfterInitDataclass, MutationRepresentation): key: AnyKvKey expire_at: datetime | None - def __init__(self, key: AnyKvKey, expire_at: datetime | None) -> None: + def __init__(self, key: AnyKvKey, **options: Unpack[MutationOptions]) -> None: if type(self) is Mutation: raise TypeError("cannot create Mutation instances directly") self.key = key - self.expire_at = expire_at - - @abstractmethod - def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: - pass + self.expire_at = options.get("expire_at") - def _expire_at_ms(self) -> int: + def expire_at_ms(self) -> int: return 0 if self.expire_at is None else int(self.expire_at.timestamp() * 1000) @@ -338,35 +1479,38 @@ def __init__( self.value = value self.versioned = versioned - def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: - return dp_protobuf.Mutation( - mutation_type=dp_protobuf.MutationType.M_SET_SUFFIX_VERSIONSTAMPED_KEY - if self.versioned - else dp_protobuf.MutationType.M_SET, - key=pack_key(self.key), - value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), - expire_at_ms=self._expire_at_ms(), + @override + def as_protobuf(self, *, v8_encoder: Encoder) -> tuple[dp_protobuf.Mutation]: + return ( + dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_SET_SUFFIX_VERSIONSTAMPED_KEY + if self.versioned + else dp_protobuf.MutationType.M_SET, + key=pack_key(self.key), + value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), + expire_at_ms=self.expire_at_ms(), + ), ) class LimitExceededPolicy(EvalEnumRepr, Enum): - ERROR = "error" + ABORT = "abort" CLAMP = "clamp" WRAP = "wrap" LimitExceededInput = Literal[ - "error", + "abort", "clamp", - LimitExceededPolicy.ERROR, + LimitExceededPolicy.ABORT, LimitExceededPolicy.CLAMP, ] -@dataclass(init=False, frozen=True, **slots_if310()) -class Limit(Container["int | float"]): +@dataclass(frozen=True, **slots_if310()) +class Limit(Container[NumberT_co]): """ - A range of numbers used to define the allowed range of Add operations. + A range of numbers used to define the allowed range of `Sum` operations. Examples -------- @@ -383,23 +1527,30 @@ class Limit(Container["int | float"]): True """ - min: int | float | None - max: int | float | None - limit_exceeded: LimitExceededPolicy + min: NumberT_co | None = field(default=None) + max: NumberT_co | None = field(default=None) + limit_exceeded: LimitExceededPolicy = field(default=LimitExceededPolicy.ABORT) - def __init__( - self, - min: int | float | None = None, - max: int | float | None = None, - limit_exceeded: LimitExceededInput | None = LimitExceededPolicy.ERROR, - ) -> None: - object.__setattr__(self, "min", min) - object.__setattr__(self, "max", max) - object.__setattr__( + if TYPE_CHECKING: + # Customise the init signature to: + # - accept string values to init limit_exceeded + # - Hide the LimitExceededPolicy.WRAP option from the init signature so + # that using it is a type error. There's no way to use a custom wrap + # limit, only LIMIT_KVU64 is supported. + def __init__( self, - "limit_exceeded", - LimitExceededPolicy(limit_exceeded or LimitExceededPolicy.ERROR), - ) + min: NumberT_co | None = None, + max: NumberT_co | None = None, + limit_exceeded: LimitExceededInput | None = LimitExceededPolicy.ABORT, + ) -> None: + pass + + def __post_init__(self) -> None: + # Support specifying limit_exceeded via the enum's string values. + if not isinstance(self.limit_exceeded, LimitExceededPolicy): + object.__setattr__( + self, "limit_exceeded", LimitExceededPolicy(self.limit_exceeded) + ) def __contains__(self, x: object) -> bool: if not isinstance(x, (int, float)): @@ -408,235 +1559,371 @@ def __contains__(self, x: object) -> bool: self.max is None or self.max >= x ) - def as_protobuf( - self, - mutation: dp_protobuf.Mutation, - *, - v8_encoder: Encoder, - value_type: type[int | float], - ) -> dp_protobuf.Mutation: - if value_type not in (int, float): - raise TypeError(f"value_type must be int or float: {value_type!r}") - - if self.min is not None: - encoded_min = bytes(v8_encoder.encode(self.min)) - Limit._validate_encoded_type( - "min", - value=self.min, - v8_value=encoded_min, - required_encoding=value_type, - ) - mutation.sum_min = encoded_min - if self.max is not None: - encoded_max = bytes(v8_encoder.encode(self.max)) - Limit._validate_encoded_type( - "max", - value=self.max, - v8_value=encoded_max, - required_encoding=value_type, - ) - mutation.sum_max = encoded_max - if self.limit_exceeded is LimitExceededPolicy.CLAMP: - mutation.sum_clamp = True - return mutation - - @staticmethod - def _validate_encoded_type( - field: Literal["min", "max"], - value: int | float, - v8_value: bytes, - required_encoding: type[int | float], - ) -> None: - assert required_encoding in (int, float) - try: - value_type = _get_number_type(_get_v8_value_tag(v8_value)) - except ValueError as e: - raise RuntimeError( - f"Limit.{field} is not None so must encode to BigInt or " - f"Number using the configured v8_encoder, but it didn't: " - f"value={value}, v8_value={v8_value!r}, error={e}" - ) from e - if value_type is not required_encoding: - raise ValueError( - f"Limit.{field} encoded to {_js_type_name(value_type)} ({value_type}) " - f"but the parent Sum's value encoded as " - f"{_js_type_name(required_encoding)} ({required_encoding}). " - "Both must encode to the same JavaScript type. Use int or " - "float consistently for both. If a the V8 serializer is " - "customised, check how it's encoding int and float values." - ) - - -def _js_type_name(py_type: type[int | float]) -> Literal["BigInt", "Number"]: - return "BigInt" if py_type is int else "Number" - LIMIT_KVU64 = Limit( - min=0, - max=2**64 - 1, + min=KvU64.RANGE[0], + max=KvU64.RANGE[-1], # Not normally allowed by types because only LIMIT_KVU64 can use WRAP. limit_exceeded=cast(LimitExceededInput, LimitExceededPolicy.WRAP), ) -LIMIT_UNLIMITED = Limit() +LIMIT_UNLIMITED = Limit[Any]() + + +class AmbiguousNumberWarning(UserWarning): + pass @dataclass(init=False, **slots_if310()) -class Sum(Mutation): - value: int | float | KvU64 - limit: Limit = field(default=Limit()) +class NumberMutation(Mutation, Generic[KvNumberNameT_co, NumberT_co, KvNumberTypeT_co]): + number_type: KvNumberInfo[KvNumberNameT_co, NumberT_co, KvNumberTypeT_co] def __init__( self, - key: AnyKvKey, - value: int | float | KvU64, *, - limit: Limit | None = None, + key: AnyKvKey, expire_at: datetime | None = None, + number_type: KvNumberInfo[KvNumberNameT_co, NumberT_co, KvNumberTypeT_co], ) -> None: - super(Sum, self).__init__(key, expire_at=expire_at) - - # Only KvU64 supports wrapping on boundary (and this can't be changed). - if isinstance(value, KvU64): - if limit is not None and limit != LIMIT_KVU64: - raise ValueError( - "limit for KvU64 cannot be changed, it must be None or LIMIT_KVU64" - ) - limit = LIMIT_KVU64 + super(NumberMutation, self).__init__(key, expire_at=expire_at) + self.number_type = number_type + + @classmethod + def _resolve_number_value_type( + cls, + value: JSBigInt | KvU64 | float | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + ) -> tuple[NumberT, KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT]]: + resolved_number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + if isinstance(number_type, KvNumberInfo): + resolved_number_type = number_type + elif number_type is not None: + number_identifier: KvNumberIdentifier = number_type + resolved_number_type = KvNumber.resolve(number_identifier).value # pyright: ignore[reportAssignmentType] else: - if limit is None: - limit = LIMIT_UNLIMITED - elif limit.limit_exceeded == LimitExceededPolicy.WRAP: - raise ValueError( - "limit for JavaScript BigInt or Number cannot be WRAP, it " - "must be ERROR or CLAMP" - ) - assert limit is not None + known_number = cast(KvU64 | JSBigInt | float, value) + resolved_number_type = KvNumber.resolve(number=known_number).value # pyright: ignore[reportAssignmentType] - self.value = value - self.limit = limit + resolved_value = cast(KvNumberTypeT | NumberT, value) - def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: - mutation = dp_protobuf.Mutation( - mutation_type=dp_protobuf.MutationType.M_SUM, - key=pack_key(self.key), - value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), - expire_at_ms=self._expire_at_ms(), + return ( + resolved_number_type.as_py_number(resolved_value), + resolved_number_type, ) - v8_number_type = _validate_number_mutation_value(self, mutation) - if v8_number_type is not None: - assert mutation.value.encoding == dp_protobuf.ValueEncoding.VE_V8 - # Only V8 values use the min/max limits. - self.limit.as_protobuf( - mutation, v8_encoder=v8_encoder, value_type=v8_number_type - ) - return mutation +@dataclass(init=False, **slots_if310()) +class Sum(NumberMutation[KvNumberNameT_co, NumberT_co, KvNumberTypeT_co]): + _INIT_OPTIONS: ClassVar = frozenset( + ["clamp_over", "clamp_under", "abort_over", "abort_under", "limit", "expire_at"] + ) + delta: Final[NumberT_co] # type: ignore[misc] + limit: Final[Limit[NumberT_co]] # type: ignore[misc] + @override + def as_protobuf( + self, *, v8_encoder: Encoder | None = None + ) -> Sequence[dp_protobuf.Mutation]: + return self.number_type.get_sum_mutations(self, v8_encoder=v8_encoder) -def _validate_number_mutation_value( - mut: Sum | Min | Max, mutation: dp_protobuf.Mutation -) -> type[int | float] | None: - """ - Validate the encoded numeric value of a Sum/Min/Max mutation operation. + @overload + def __init__( # pyright: ignore[reportOverlappingOverload] + self: BigIntSum, + key: AnyKvKey, + delta: JSBigInt, + number_type: None = None, + **options: Unpack[SumOptions[int]], + ) -> None: ... - If the operation value is a V8-encoded number, the return value is the int - or float type, indicating if the encoded value is BigInt or Number. - Otherwise the return value is None. - """ - if mutation.value.encoding == dp_protobuf.ValueEncoding.VE_LE64: - return None - elif mutation.value.encoding == dp_protobuf.ValueEncoding.VE_V8: - try: - value_type = _get_number_type(_get_v8_value_tag(mutation.value.data)) - except ValueError as e: - raise RuntimeError( - f"{type(mut).__name__}.value is not KvU64 so it must encode to " - f"BigInt or Number using the configured v8_encoder, but it didn't: " - f"value={mut.value!r}, v8_value={mutation.value.data!r}, error={e}" - ) from e + @overload + def __init__( + self: BigIntSum, + key: AnyKvKey, + delta: int | JSBigInt, + number_type: BigIntKvNumberIdentifier, + **options: Unpack[SumOptions[int]], + ) -> None: ... - return value_type + @overload + def __init__( + self: U64Sum, + key: AnyKvKey, + delta: KvU64, + number_type: None = None, + **options: Unpack[SumOptions[int]], + ) -> None: ... - raise ValueError( - f"{type(mut).__name__}.value is not a KvU64 or number that " - f"V8-serializes to BigInt or Number: value={mut.value!r}, ValueEncoding: " - f"{enum_name(dp_protobuf.ValueEncoding, mutation.value.encoding)}" - ) + @overload + def __init__( + self: U64Sum, + key: AnyKvKey, + delta: int | KvU64, + number_type: U64KvNumberIdentifier, + **options: Unpack[SumOptions[int]], + ) -> None: ... + @overload + def __init__( + self: FloatSum, + key: AnyKvKey, + delta: float, + number_type: FloatKvNumberIdentifier | None = None, + **options: Unpack[SumOptions[float]], + ) -> None: ... + + @overload + def __init__( + self: Sum[KvNumberNameT, NumberT, KvNumberTypeT], + key: AnyKvKey, + delta: NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + # Can't use float limits unless the float type is explicitly being used, + # as float is incompatible with the other number types, but int is + # compatible. + **options: Unpack[SumOptions[NumberT]], + ) -> None: ... + + def __init__( + self: Sum[KvNumberNameT, NumberT, KvNumberTypeT], + key: AnyKvKey, + delta: JSBigInt | KvU64 | float | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + **options: Unpack[SumOptions[int | float | NumberT]], + ) -> None: + if options.keys() - self._INIT_OPTIONS: + arg = next(iter(options.keys() - self._INIT_OPTIONS)) + raise TypeError( + f"Sum.__init__() got an unexpected keyword argument {arg!r}" + ) + resolved_delta, resolved_number_type = Sum._resolve_number_value_type( + delta, number_type + ) + super(Sum, self).__init__( + key=key, + expire_at=options.pop("expire_at", None), + number_type=resolved_number_type, + ) + self.limit = ( + Sum._create_limit(**cast(LimitOptions[NumberT], options)) + or resolved_number_type.default_limit + ) + resolved_number_type.validate_limit(self.limit) + self.delta = resolved_delta + + @classmethod + def _create_limit( + cls, **options: Unpack[LimitOptions[NumberT]] + ) -> Limit[NumberT] | None: + limits = dict[Literal["limit=", "clamp_*=", "abort_*="], Limit[NumberT]]() + + if limit := options.get("limit"): + limits["limit="] = limit + + if "clamp_under" in options or "clamp_over" in options: + limits["clamp_*="] = Limit( + min=options.get("clamp_under"), + max=options.get("clamp_over"), + limit_exceeded=LimitExceededPolicy.CLAMP, + ) -def _get_v8_value_tag(v8_value: bytes) -> SerializationTag: - """Inspect a V8-serialized value to determine the type of value it holds.""" - try: - rts = ReadableTagStream(v8_value) - rts.read_header() - return rts.read_tag() - except v8serialize.V8SerializeError as e: - raise ValueError("v8_value bytes does not contain a V8-encoded value") from e + if "abort_under" in options or "abort_over" in options: + limits["abort_*="] = Limit( + min=options.get("abort_under"), + max=options.get("abort_over"), + limit_exceeded=LimitExceededPolicy.ABORT, + ) + + if len(limits) > 1: + options_used = ", ".join(sorted(limits)) + raise with_notes( + ValueError( + f"Limit keyword arguments in conflict: " + f"Options {options_used} cannot be used together." + ), + "Use limit=Limit(limit_exceeded=..., ...) to create a limit " + "with a dynamic type.", + ) + return next(iter(limits.values()), None) -def _get_number_type(tag: SerializationTag) -> type[int | float]: - """Determine the JS number type of a V8-serialized value type tag.""" - if tag is SerializationTag.kBigInt or tag is SerializationTag.kBigIntObject: - return int - elif tag in { - SerializationTag.kNumberObject, - SerializationTag.kDouble, - SerializationTag.kInt32, - SerializationTag.kUint32, - }: - return float - raise ValueError(f"tag is not a BigInt or Number: {tag}") +BigIntSum: TypeAlias = Sum[Literal["bigint"], int, JSBigInt] +FloatSum: TypeAlias = Sum[Literal["float"], float, float] +U64Sum: TypeAlias = Sum[Literal["u64"], int, KvU64] -@dataclass(**slots_if310()) -class Min(Mutation): - value: KvU64 +@dataclass(init=False, **slots_if310()) +class Min(NumberMutation[KvNumberNameT_co, NumberT_co, KvNumberTypeT_co]): + value: Final[NumberT_co] # type: ignore[misc] + + @overload + def __init__( # pyright: ignore[reportOverlappingOverload] + self: BigIntMin, + key: AnyKvKey, + value: JSBigInt, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> None: ... + @overload def __init__( - self, + self: BigIntMin, + key: AnyKvKey, + value: int | JSBigInt, + number_type: BigIntKvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload + def __init__( + self: U64Min, + key: AnyKvKey, + value: KvU64, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload + def __init__( + self: U64Min, key: AnyKvKey, value: int | KvU64, - *, - expire_at: datetime | None = None, - ) -> None: - super(Min, self).__init__(key, expire_at=expire_at) - self.value = value if isinstance(value, KvU64) else KvU64(value) + number_type: U64KvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> None: ... - def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: - mutation = dp_protobuf.Mutation( - mutation_type=dp_protobuf.MutationType.M_MIN, - key=pack_key(self.key), - value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), - expire_at_ms=self._expire_at_ms(), + @overload + def __init__( + self: FloatMin, + key: AnyKvKey, + value: float, + number_type: FloatKvNumberIdentifier | None = None, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload + def __init__( + self: Min[KvNumberNameT, NumberT, KvNumberTypeT], + key: AnyKvKey, + value: NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + # Can't use float limits unless the float type is explicitly being used, + # as float is incompatible with the other number types, but int is + # compatible. + **options: Unpack[MutationOptions], + ) -> None: ... + + def __init__( + self: Min[KvNumberNameT, NumberT, KvNumberTypeT], + key: AnyKvKey, + value: JSBigInt | KvU64 | float | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + **options: Unpack[MutationOptions], + ) -> None: + resolved_number, resolved_number_type = Min._resolve_number_value_type( + value, number_type ) - _validate_number_mutation_value(self, mutation) - return mutation + super(Min, self).__init__(key=key, number_type=resolved_number_type, **options) + self.value = resolved_number + @override + def as_protobuf(self, *, v8_encoder: Encoder) -> Sequence[dp_protobuf.Mutation]: + return self.number_type.get_min_mutations(self, v8_encoder=v8_encoder) -@dataclass(**slots_if310()) -class Max(Mutation): - value: int | float | KvU64 +BigIntMin: TypeAlias = Min[Literal["bigint"], int, JSBigInt] +FloatMin: TypeAlias = Min[Literal["float"], float, float] +U64Min: TypeAlias = Min[Literal["u64"], int, KvU64] + + +@dataclass(init=False, **slots_if310()) +class Max(NumberMutation[KvNumberNameT_co, NumberT_co, KvNumberTypeT_co]): + value: Final[NumberT_co] # type: ignore[misc] + + @overload + def __init__( # pyright: ignore[reportOverlappingOverload] + self: BigIntMax, + key: AnyKvKey, + value: JSBigInt, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload def __init__( - self, + self: BigIntMax, + key: AnyKvKey, + value: int | JSBigInt, + number_type: BigIntKvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload + def __init__( + self: U64Max, + key: AnyKvKey, + value: KvU64, + number_type: None = None, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload + def __init__( + self: U64Max, key: AnyKvKey, value: int | KvU64, - *, - expire_at: datetime | None = None, - ) -> None: - super(Max, self).__init__(key, expire_at=expire_at) - self.value = value if isinstance(value, KvU64) else KvU64(value) + number_type: U64KvNumberIdentifier, + **options: Unpack[MutationOptions], + ) -> None: ... - def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Mutation: - mutation = dp_protobuf.Mutation( - mutation_type=dp_protobuf.MutationType.M_MAX, - key=pack_key(self.key), - value=encode_kv_write_value(self.value, v8_encoder=v8_encoder), - expire_at_ms=self._expire_at_ms(), + @overload + def __init__( + self: FloatMax, + key: AnyKvKey, + value: float, + number_type: FloatKvNumberIdentifier | None = None, + **options: Unpack[MutationOptions], + ) -> None: ... + + @overload + def __init__( + self: Max[KvNumberNameT, NumberT, KvNumberTypeT], + key: AnyKvKey, + value: NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + # Can't use float limits unless the float type is explicitly being used, + # as float is incompatible with the other number types, but int is + # compatible. + **options: Unpack[MutationOptions], + ) -> None: ... + + def __init__( + self: Max[KvNumberNameT, NumberT, KvNumberTypeT], + key: AnyKvKey, + value: JSBigInt | KvU64 | float | NumberT | KvNumberTypeT, + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT] + | KvNumberIdentifier + | None = None, + **options: Unpack[MutationOptions], + ) -> None: + resolved_number, resolved_number_type = Max._resolve_number_value_type( + value, number_type ) - _validate_number_mutation_value(self, mutation) - return mutation + super(Max, self).__init__(key=key, number_type=resolved_number_type, **options) + self.value = resolved_number + + @override + def as_protobuf(self, *, v8_encoder: Encoder) -> Sequence[dp_protobuf.Mutation]: + return self.number_type.get_max_mutations(self, v8_encoder=v8_encoder) + + +BigIntMax: TypeAlias = Max[Literal["bigint"], int, JSBigInt] +FloatMax: TypeAlias = Max[Literal["float"], float, float] +U64Max: TypeAlias = Max[Literal["u64"], int, KvU64] @dataclass(**slots_if310()) @@ -644,9 +1931,14 @@ class Delete(Mutation): def __init__(self, key: AnyKvKey) -> None: super(Delete, self).__init__(key, expire_at=None) - def as_protobuf(self, *, v8_encoder: Encoder | None = None) -> dp_protobuf.Mutation: - return dp_protobuf.Mutation( - mutation_type=dp_protobuf.MutationType.M_DELETE, key=pack_key(self.key) + @override + def as_protobuf( + self, *, v8_encoder: Encoder | None = None + ) -> tuple[dp_protobuf.Mutation]: + return ( + dp_protobuf.Mutation( + mutation_type=dp_protobuf.MutationType.M_DELETE, key=pack_key(self.key) + ), ) @@ -656,8 +1948,12 @@ def as_protobuf(self, *, v8_encoder: Encoder | None = None) -> dp_protobuf.Mutat DEFAULT_ENQUEUE_RETRY_DELAY_COUNT = 10 +class EnqueueRepresentation(SingleProtobufMessageRepresentation[dp_protobuf.Enqueue]): + __slots__ = () + + @dataclass(init=False, **slots_if310()) -class Enqueue(FrozenAfterInitDataclass): +class Enqueue(FrozenAfterInitDataclass, EnqueueRepresentation): """ A message to be async-delivered to a Deno app listening to the Kv's queue. @@ -704,15 +2000,18 @@ def __init__( ) self.dead_letter_keys = () if dead_letter_keys is None else dead_letter_keys - def as_protobuf(self, *, v8_encoder: Encoder) -> dp_protobuf.Enqueue: + @override + def as_protobuf(self, *, v8_encoder: Encoder) -> tuple[dp_protobuf.Enqueue]: deadline_ms = None if self.delivery_time is not None: deadline_ms = int(self.delivery_time.timestamp() * 1000) - return dp_protobuf.Enqueue( - payload=bytes(v8_encoder.encode(self.message)), - keys_if_undelivered=[pack_key(k) for k in self.dead_letter_keys], - deadline_ms=deadline_ms, - backoff_schedule=self._evaluate_backoff_schedule(), + return ( + dp_protobuf.Enqueue( + payload=bytes(v8_encoder.encode(self.message)), + keys_if_undelivered=[pack_key(k) for k in self.dead_letter_keys], + deadline_ms=deadline_ms, + backoff_schedule=self._evaluate_backoff_schedule(), + ), ) def _evaluate_backoff_schedule(self) -> Sequence[int]: diff --git a/src/denokv/kv.py b/src/denokv/kv.py index 2093843..a6993c3 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -11,6 +11,7 @@ from enum import auto from functools import partial from os import environ +from types import EllipsisType from types import TracebackType from typing import Literal from typing import overload @@ -27,13 +28,15 @@ from denokv._datapath_pb2 import AtomicWrite from denokv._datapath_pb2 import SnapshotRead from denokv._datapath_pb2 import SnapshotReadOutput +from denokv._kv_types import AtomicWriteRepresentationWriter +from denokv._kv_types import KvWriter +from denokv._kv_types import KvWriterWriteResult +from denokv._kv_types import WriteResultT from denokv._kv_values import KvEntry from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp from denokv._kv_writes import Check -from denokv._kv_writes import CommittedWrite from denokv._kv_writes import CompletedWrite -from denokv._kv_writes import ConflictedWrite from denokv._kv_writes import Enqueue from denokv._kv_writes import Mutation from denokv._kv_writes import PlannedWrite @@ -366,7 +369,7 @@ class KvFlags(Flag): @dataclass(init=False) -class Kv(AbstractAsyncContextManager["Kv", None]): +class Kv(KvWriter, AbstractAsyncContextManager["Kv", None]): """ Interface to perform requests against a Deno KV database. @@ -719,11 +722,20 @@ async def _snapshot_read( partial(datapath.snapshot_read, read=read), consistency=consistency ) + @staticmethod + def _parse_versionstamp( + value: tuple[bytes, EndpointInfo], + ) -> tuple[VersionStamp, EndpointInfo]: + raw_versionstamp, endpoint = value + return VersionStamp(raw_versionstamp), endpoint + async def _atomic_write(self, write: AtomicWrite) -> _KvAtomicWriteResult: - return await self._datapath_request( - partial(datapath.atomic_write, write=write), - consistency=ConsistencyLevel.STRONG, - ) + return ( + await self._datapath_request( + partial(datapath.atomic_write, write=write), + consistency=ConsistencyLevel.STRONG, + ) + ).map(self._parse_versionstamp) async def _datapath_request( self, @@ -787,58 +799,61 @@ async def write(self, *operations: WriteOperation) -> CompletedWrite: ... @overload async def write(self, planned_write: PlannedWrite, /) -> CompletedWrite: ... + @overload + async def write( + self, atomic_write: AtomicWriteRepresentationWriter[WriteResultT], / + ) -> WriteResultT: ... + + @overload + async def write( + self, *, protobuf_atomic_write: dp_protobuf.AtomicWrite + ) -> KvWriterWriteResult: ... + + @override async def write( - self, arg: PlannedWrite | WriteOperation | None = None, *args: WriteOperation - ) -> CompletedWrite: - if arg is None: + self, + arg: AtomicWriteRepresentationWriter[WriteResultT] + | WriteOperation + | EllipsisType = ..., # ... is a sentinel to detect 0 args + *args: WriteOperation, + protobuf_atomic_write: dp_protobuf.AtomicWrite | None = None, + ) -> CompletedWrite | WriteResultT | KvWriterWriteResult: + if protobuf_atomic_write is not None: + if arg is not ... or len(args) > 0: + raise TypeError( + "Kv.write() got an unexpected positional argument with " + "keyword argument 'protobuf_atomic_write'" + ) + + return await self._atomic_write(protobuf_atomic_write) + + planned_write: PlannedWrite | AtomicWriteRepresentationWriter[WriteResultT] + if arg is ...: + # arg is ... when 0 args were passed, which is OK (no operations). + # But ... when args are provided means it was passed explicitly. if args: - raise TypeError("arguments cannot be None") - # One None arg means 0 args were passed + raise TypeError("Kv.write() got an unexpected '...'") + # Note that it's OK to submit a write with no operations. We get a + # versionstamp back. Submitting a write with only checks could be + # used to check if a key has been changed without reading the value. planned_write = PlannedWrite() - elif isinstance(arg, PlannedWrite): + elif isinstance(arg, AtomicWriteRepresentationWriter): planned_write = arg if args: - raise TypeError("unexpected arguments after PlannedWrite") + raise TypeError( + "Kv.write() got unexpected arguments after 'planned_write'" + ) else: planned_write = self.atomic(arg, *args) - # Note that it's OK to submit a write with no operations. We get a - # versionstamp back. Submitting a write with only checks could be used - # to check if a key has been changed without reading the value. - result = await self._atomic_write( - planned_write.as_protobuf(v8_encoder=self.v8_encoder) - ) - - concrete_checks = [ - Check(key=key_ver.key, versionstamp=key_ver.versionstamp) - for key_ver in planned_write.checks - ] - - if isinstance(result, Err): - if isinstance(result.error, CheckFailure): - check_failure = result.error - return ConflictedWrite( - failed_checks=list(check_failure.failed_check_indexes), - checks=concrete_checks, - mutations=planned_write.mutations, - enqueues=planned_write.enqueues, - ) - raise result.error - - raw_versionstamp, endpoint = result.value - return CommittedWrite( - versionstamp=VersionStamp(raw_versionstamp), - checks=concrete_checks, - mutations=planned_write.mutations, - enqueues=planned_write.enqueues, - ) + return await planned_write.write(kv=self, v8_encoder=self.v8_encoder) _KvSnapshotReadResult: TypeAlias = Result[ tuple[SnapshotReadOutput, EndpointInfo], DataPathError ] _KvAtomicWriteResult: TypeAlias = Result[ - tuple[bytes, EndpointInfo], CheckFailure | DataPathError + tuple[VersionStamp, EndpointInfo], CheckFailure | DataPathError ] diff --git a/test/denokv_testing.py b/test/denokv_testing.py index a1e390d..7954aa5 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -221,6 +221,10 @@ class MockKvDbMessage(NamedTuple): backoff_schedule: Sequence[int] +class SumLimitExceeded(ValueError): + pass + + @dataclass class MockKvDb: entries: list[MockKvDbEntry] @@ -527,7 +531,7 @@ def _get_number_operator( "Mutation used different number types for value/sum_min/sum_max" ) boundary = ( - LimitExceededPolicy.CLAMP if mut.sum_clamp else LimitExceededPolicy.ERROR + LimitExceededPolicy.CLAMP if mut.sum_clamp else LimitExceededPolicy.ABORT ) return MutationSumOperator(min=min_, max=max_, boundary=boundary) elif mut.mutation_type == MutationType.M_MAX: @@ -556,8 +560,8 @@ def __call__(self, left: int | float, right: int | float) -> int | float: if self.boundary is LimitExceededPolicy.CLAMP: result = min else: - assert self.boundary is LimitExceededPolicy.ERROR - raise ValueError( + assert self.boundary is LimitExceededPolicy.ABORT + raise SumLimitExceeded( f"result of sum({left}, {right}) = {result}, which is less " f"than the minimum {min}" ) @@ -565,8 +569,8 @@ def __call__(self, left: int | float, right: int | float) -> int | float: if self.boundary is LimitExceededPolicy.CLAMP: result = max else: - assert self.boundary is LimitExceededPolicy.ERROR - raise ValueError( + assert self.boundary is LimitExceededPolicy.ABORT + raise SumLimitExceeded( f"result of sum({left}, {right}) = {result}, which is " f"greater than the maximum {max}" ) diff --git a/test/test__kv_writes__Check.py b/test/test__kv_writes__Check.py new file mode 100644 index 0000000..4d4054f --- /dev/null +++ b/test/test__kv_writes__Check.py @@ -0,0 +1,36 @@ +from v8serialize import Encoder + +from denokv import _datapath_pb2 as datapath_pb2 +from denokv._kv_values import VersionStamp +from denokv._kv_writes import Check +from denokv.kv_keys import KvKey + + +def test_constructors() -> None: + assert Check(KvKey("a"), VersionStamp(1)) == Check.for_key_with_version( + KvKey("a"), VersionStamp(1) + ) + assert Check(KvKey("a"), None) == Check.for_key_not_set(KvKey("a")) + + +def test_as_protobuf(v8_encoder: Encoder) -> None: + protobuf = ( + datapath_pb2.Check(key=bytes(KvKey("a")), versionstamp=bytes(VersionStamp(1))), + ) + # v8_encoder is optional + assert Check(KvKey("a"), VersionStamp(1)).as_protobuf() == protobuf + assert Check(KvKey("a"), VersionStamp(1)).as_protobuf(v8_encoder=None) == protobuf + + assert ( + Check(KvKey("a"), VersionStamp(1)).as_protobuf(v8_encoder=v8_encoder) + == protobuf + ) + + +def test_as_protobuf__empty_version_is_different_to_zero_version() -> None: + assert datapath_pb2.Check(key=bytes(KvKey("a"))) == datapath_pb2.Check( + key=bytes(KvKey("a")), versionstamp=b"" + ) + assert datapath_pb2.Check(key=bytes(KvKey("a"))) != datapath_pb2.Check( + versionstamp=VersionStamp(0) + ) diff --git a/test/test__kv_writes__CommittedWrite.py b/test/test__kv_writes__CommittedWrite.py new file mode 100644 index 0000000..c6b6ff2 --- /dev/null +++ b/test/test__kv_writes__CommittedWrite.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from datetime import datetime + +from yarl import URL + +from denokv._kv_values import VersionStamp +from denokv._kv_writes import Check +from denokv._kv_writes import CommittedWrite +from denokv._kv_writes import Enqueue +from denokv._kv_writes import Set +from denokv.auth import ConsistencyLevel +from denokv.auth import EndpointInfo +from denokv.kv_keys import KvKey +from denokv.result import is_ok + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) + + +def test_is_AnySuccess() -> None: + assert is_ok( + CommittedWrite( + VersionStamp(1), checks=[], mutations=[], enqueues=[], endpoint=EP + ) + ) + + +def test_constructors() -> None: + instance = CommittedWrite( + VersionStamp(1), checks=[], mutations=[], enqueues=[], endpoint=EP + ) + assert instance.ok + assert instance.versionstamp == VersionStamp(1) + assert instance.checks == () + assert instance.mutations == () + assert instance.enqueues == () + assert instance.endpoint is EP + + instance = CommittedWrite( + VersionStamp(1), + checks=[Check.for_key_not_set(key=KvKey("a"))], + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + ) + assert instance.ok + assert instance.versionstamp == VersionStamp(1) + assert instance.checks == (Check.for_key_not_set(key=KvKey("a")),) + assert instance.mutations == (Set(KvKey("a"), 42),) + assert instance.enqueues == (Enqueue("Hi"),) + assert instance.endpoint is EP + + +def test_str_repr() -> None: + instance = CommittedWrite( + VersionStamp(1), + checks=[Check.for_key_not_set(key=KvKey("a"))], + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + ) + assert ( + str(instance) == "" + ) + assert str(instance) == repr(instance) diff --git a/test/test__kv_writes__ConflictedWrite.py b/test/test__kv_writes__ConflictedWrite.py new file mode 100644 index 0000000..39667f6 --- /dev/null +++ b/test/test__kv_writes__ConflictedWrite.py @@ -0,0 +1,82 @@ +from datetime import datetime + +import pytest +from yarl import URL + +from denokv._kv_writes import Check +from denokv._kv_writes import ConflictedWrite +from denokv._kv_writes import Enqueue +from denokv._kv_writes import Set +from denokv.auth import ConsistencyLevel +from denokv.auth import EndpointInfo +from denokv.kv_keys import KvKey +from denokv.result import is_err + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) + + +@pytest.fixture +def instance() -> ConflictedWrite: + checks = [ + Check.for_key_not_set(KvKey("a")), + Check.for_key_not_set(KvKey("b")), + Check.for_key_not_set(KvKey("c")), + ] + return ConflictedWrite( + failed_checks=[0, 2], + checks=list(checks), + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + ) + + +def test_constructor(instance: ConflictedWrite) -> None: + checks = [ + Check.for_key_not_set(KvKey("a")), + Check.for_key_not_set(KvKey("b")), + Check.for_key_not_set(KvKey("c")), + ] + instance = ConflictedWrite( + failed_checks=[0, 2], + checks=list(checks), + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + ) + + assert not instance.ok + assert instance.versionstamp is None + assert instance.checks == tuple(checks) + assert instance.mutations == (Set(KvKey("a"), 42),) + assert instance.enqueues == (Enqueue("Hi"),) + assert instance.endpoint is EP + + assert dict(instance.conflicts) == {KvKey("a"): checks[0], KvKey("c"): checks[2]} + assert instance.conflicts[KvKey("a")] is checks[0] + + # conflicts is immutable + with pytest.raises(TypeError): + del instance.conflicts[KvKey("a")] # type: ignore[attr-defined] + + with pytest.raises(ValueError, match=r"failed_checks contains out-of-bounds index"): + ConflictedWrite( + failed_checks=[0, 10], + checks=list(checks), + mutations=[Set(KvKey("a"), 42)], + enqueues=[], + endpoint=EP, + ) + + +def test_is_AnyFailure(instance: ConflictedWrite) -> None: + assert is_err(instance) + + +def test_str_repr(instance: ConflictedWrite) -> None: + assert ( + str(instance) == "" + ) + assert str(instance) == repr(instance) diff --git a/test/test__kv_writes__Delete.py b/test/test__kv_writes__Delete.py new file mode 100644 index 0000000..88d330a --- /dev/null +++ b/test/test__kv_writes__Delete.py @@ -0,0 +1,23 @@ +from datetime import datetime + +from v8serialize import Encoder + +from denokv import _datapath_pb2 as datapath_pb2 +from denokv._kv_writes import Delete +from denokv.kv_keys import KvKey + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") + + +def test_constructors() -> None: + instance = Delete(KvKey("a")) + assert instance.key == KvKey("a") + + +def test_as_protobuf(v8_encoder: Encoder) -> None: + delete = Delete(KvKey("a")) + assert delete.as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.Mutation( + key=bytes(KvKey("a")), mutation_type=datapath_pb2.M_DELETE + ), + ) diff --git a/test/test__kv_writes__Enqueue.py b/test/test__kv_writes__Enqueue.py new file mode 100644 index 0000000..f63acf5 --- /dev/null +++ b/test/test__kv_writes__Enqueue.py @@ -0,0 +1,70 @@ +from datetime import datetime +from itertools import count +from itertools import islice + +from v8serialize import Encoder + +from denokv import _datapath_pb2 as datapath_pb2 +from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAY_COUNT +from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAYS +from denokv._kv_writes import Enqueue +from denokv.kv_keys import KvKey + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") + + +def test_constructors() -> None: + message = {"msg": "Hi"} + instance = Enqueue(message) + assert instance.message is message + assert instance.delivery_time is None + assert instance.retry_delays == DEFAULT_ENQUEUE_RETRY_DELAYS + assert len(instance.dead_letter_keys) == 0 + + retry_delays = [1, 2, 3] + dead_letter_keys = (KvKey("a"),) + instance = Enqueue( + message, + delivery_time=T1, + retry_delays=retry_delays, + dead_letter_keys=dead_letter_keys, + ) + assert instance.message is message + assert instance.delivery_time == T1 + assert instance.retry_delays == retry_delays + assert instance.dead_letter_keys == dead_letter_keys + + +def test_as_protobuf__default_retry_delays(v8_encoder: Encoder) -> None: + message = {"msg": "Hi"} + instance = Enqueue(message) + (protobuf,) = instance.as_protobuf(v8_encoder=v8_encoder) + + # Default retry delays have random jitter. A fixed number are drawn from the + # backoff provider. + assert len(protobuf.backoff_schedule) == DEFAULT_ENQUEUE_RETRY_DELAY_COUNT + assert all(delay > 0 for delay in protobuf.backoff_schedule) + + +def test_as_protobuf(v8_encoder: Encoder) -> None: + message = {"msg": "Hi"} + instance = Enqueue(message, retry_delays=[]) + (protobuf,) = instance.as_protobuf(v8_encoder=v8_encoder) + assert protobuf == datapath_pb2.Enqueue(payload=bytes(v8_encoder.encode(message))) + + evaluated_backoff = [ + i * 1000 for i in islice(count(1), DEFAULT_ENQUEUE_RETRY_DELAY_COUNT) + ] + instance = Enqueue( + message, + retry_delays=count(1), + delivery_time=T1, + dead_letter_keys=[KvKey("a"), KvKey("b")], + ) + (protobuf,) = instance.as_protobuf(v8_encoder=v8_encoder) + assert protobuf == datapath_pb2.Enqueue( + payload=bytes(v8_encoder.encode(message)), + backoff_schedule=evaluated_backoff, + keys_if_undelivered=[bytes(KvKey("a")), bytes(KvKey("b"))], + deadline_ms=int(T1.timestamp() * 1000), + ) diff --git a/test/test__kv_writes__KvNumber.py b/test/test__kv_writes__KvNumber.py new file mode 100644 index 0000000..13c3817 --- /dev/null +++ b/test/test__kv_writes__KvNumber.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from dataclasses import FrozenInstanceError +from typing import Literal + +import pytest +from v8serialize.jstypes import JSBigInt + +from denokv._kv_values import KvU64 +from denokv._kv_writes import BigIntKvNumberInfo +from denokv._kv_writes import FloatKvNumberInfo +from denokv._kv_writes import KvNumber +from denokv._kv_writes import KvNumberInfo +from denokv._kv_writes import U64KvNumberInfo +from denokv._pycompat.typing import assert_type + + +def test_dataclass_behaviours() -> None: + assert KvNumber.bigint < KvNumber.float + assert KvNumber.float < KvNumber.u64 + assert {KvNumber.bigint: "foo"}[KvNumber.bigint] == "foo" + + assert sorted(KvNumber) == [ + KvNumber.bigint, + KvNumber.float, + KvNumber.u64, + ] + + with pytest.raises(FrozenInstanceError): + KvNumber.bigint.foo = "bar" # type: ignore[attr-defined] + + +def test_resolve() -> None: + assert KvNumber.bigint.name == "bigint" + assert ( + assert_type(KvNumber.resolve("bigint"), Literal[KvNumber.bigint]) + is KvNumber.bigint + ) + assert ( + assert_type(KvNumber.resolve("float"), Literal[KvNumber.float]) + is KvNumber.float + ) + assert assert_type(KvNumber.resolve("u64"), Literal[KvNumber.u64]) is KvNumber.u64 + assert ( + assert_type(KvNumber.resolve(JSBigInt), Literal[KvNumber.bigint]) + is KvNumber.bigint + ) + assert ( + assert_type(KvNumber.resolve(float), Literal[KvNumber.float]) is KvNumber.float + ) + assert assert_type(KvNumber.resolve(KvU64), Literal[KvNumber.u64]) is KvNumber.u64 + + +def test_types() -> None: + assert_type(KvNumber.bigint.value, BigIntKvNumberInfo) + assert_type(KvNumber.float.value, FloatKvNumberInfo) + assert_type(KvNumber.u64.value, U64KvNumberInfo) + + _t1: KvNumberInfo[Literal["bigint"], int, JSBigInt] = KvNumber.bigint.value + _t2: KvNumberInfo[Literal["float"], float, float] = KvNumber.float.value + _t3: KvNumberInfo[Literal["u64"], int, KvU64] = KvNumber.u64.value + + # name is covariant — can treat the name as str + _t4: KvNumberInfo[str, int, KvU64] = KvNumber.u64.value + # number params are invariant — cannot broaden the types + _t_err1: KvNumberInfo[str, int | float, KvU64] = KvNumber.u64.value # type: ignore[assignment] + _t_err2: KvNumberInfo[str, int, KvU64 | float] = KvNumber.u64.value # type: ignore[assignment] diff --git a/test/test__kv_writes__Limit.py b/test/test__kv_writes__Limit.py new file mode 100644 index 0000000..194da19 --- /dev/null +++ b/test/test__kv_writes__Limit.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from v8serialize.constants import FLOAT64_SAFE_INT_RANGE +from v8serialize.jstypes import JSBigInt + +from denokv._kv_values import KvU64 +from denokv._kv_writes import LIMIT_KVU64 +from denokv._kv_writes import LIMIT_UNLIMITED +from denokv._kv_writes import Limit +from denokv._kv_writes import LimitExceededPolicy + + +def test_constructor() -> None: + assert Limit(1, 5, "clamp").limit_exceeded is LimitExceededPolicy.CLAMP + assert Limit(1, 5, "abort").limit_exceeded is LimitExceededPolicy.ABORT + assert Limit(1, 5).limit_exceeded is LimitExceededPolicy.ABORT + assert ( + Limit(1, 5, LimitExceededPolicy.CLAMP).limit_exceeded + is LimitExceededPolicy.CLAMP + ) + assert Limit(1, 5).min == 1 + assert type(Limit(1, 5).min) is int + assert Limit(1, 5).max == 5 + assert type(Limit(1, 5).max) is int + + +def test_contains() -> None: + assert 3 in Limit(max=5) + assert -10 in Limit(max=5) + assert 5 in Limit(max=5) + assert 6 not in Limit(max=5) + assert 1 in Limit(1, 5) + assert -10 not in Limit(0, 5) + assert 10 not in Limit(0, 5) + assert 5 in Limit() + # Non-numbers are not contained + assert object() not in Limit() + + # contains works across types + assert 1.0 in Limit(FLOAT64_SAFE_INT_RANGE.start, FLOAT64_SAFE_INT_RANGE.stop - 1) + assert JSBigInt(1) in Limit( + float(FLOAT64_SAFE_INT_RANGE.start), float(FLOAT64_SAFE_INT_RANGE.stop - 1) + ) + + +def test_LIMIT_KVU64() -> None: + assert LIMIT_KVU64.limit_exceeded is LimitExceededPolicy.WRAP + assert KvU64.RANGE[0] in LIMIT_KVU64 + assert KvU64.RANGE[-1] in LIMIT_KVU64 + + +def test_LIMIT_UNLIMITED() -> None: + assert -(2**256) in LIMIT_UNLIMITED + assert 0 in LIMIT_UNLIMITED + assert 2**256 in LIMIT_UNLIMITED + _limit1: Limit[int] = LIMIT_UNLIMITED + _limit2: Limit[float] = LIMIT_UNLIMITED diff --git a/test/test__kv_writes__Max.py b/test/test__kv_writes__Max.py new file mode 100644 index 0000000..15b52be --- /dev/null +++ b/test/test__kv_writes__Max.py @@ -0,0 +1,111 @@ +import builtins +from datetime import datetime +from typing import Literal + +import pytest +from v8serialize import Encoder +from v8serialize.jstypes import JSBigInt + +from denokv import _datapath_pb2 as pb2 +from denokv._kv_values import KvU64 +from denokv._kv_writes import BigIntMax +from denokv._kv_writes import FloatMax +from denokv._kv_writes import KvNumber +from denokv._kv_writes import KvNumberInfo +from denokv._kv_writes import Max +from denokv._kv_writes import U64Max +from denokv._pycompat.typing import Any +from denokv._pycompat.typing import NewType +from denokv._pycompat.typing import assert_type +from denokv._pycompat.typing import cast +from denokv.kv_keys import KvKey +from test.denokv_testing import typeval + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +k = KvKey("a") + + +def test_init__float() -> None: + float_max = Max(k, 9, KvNumber.float.value) + assert float_max.key is k + assert typeval(float_max.value) == (int, 9) + assert float_max.number_type is KvNumber.float.value + + assert typeval(Max(k, 9.0).value) == (float, 9.0) + assert Max(k, 9) == float_max + assert Max(k, 9.0) == float_max + + for nt in ("float", KvNumber.float, builtins.float, KvNumber.float.value): + assert Max(k, 9, nt) == float_max + + assert Max(k, 9, "float", expire_at=T1).expire_at == T1 + + +def test_init__bigint() -> None: + bigint_max = Max(k, 9, KvNumber.bigint.value) + assert bigint_max.key is k + assert typeval(bigint_max.value) == (int, 9) + assert bigint_max.number_type is KvNumber.bigint.value + + assert Max(k, JSBigInt(9)) == bigint_max + + for nt in ("bigint", KvNumber.bigint, JSBigInt, KvNumber.bigint.value): + assert Max(k, 9, nt) == bigint_max + + assert Max(k, 9, "bigint", expire_at=T1).expire_at == T1 + + +def test_init__u64() -> None: + u64_max = Max(k, 9, KvNumber.u64.value) + assert u64_max.key is k + assert typeval(u64_max.value) == (int, 9) + assert u64_max.number_type is KvNumber.u64.value + + assert Max(k, KvU64(9)) == u64_max + + for nt in ("u64", KvNumber.u64, KvU64, KvNumber.u64.value): + assert Max(k, 9, nt) == u64_max + + assert Max(k, 9, "u64", expire_at=T1).expire_at == T1 + + +def test_init__overloads() -> None: + k = KvKey("a") + bigint, float, u64 = KvNumber.bigint.value, KvNumber.float.value, KvNumber.u64.value + assert assert_type(Max(k, 9), FloatMax).number_type == float + assert assert_type(Max(k, 9.0), FloatMax).number_type == float + assert assert_type(Max(k, 9, "float"), FloatMax).number_type == float + assert assert_type(Max(k, 9, KvNumber.float), FloatMax).number_type == float + assert assert_type(Max(k, 9, builtins.float), FloatMax).number_type == float + assert assert_type(Max(k, 9, float), FloatMax).number_type == float + + assert assert_type(Max(k, 9, "bigint"), BigIntMax).number_type == bigint + assert assert_type(Max(k, 9, KvNumber.bigint), BigIntMax).number_type == bigint + assert assert_type(Max(k, 9, JSBigInt), BigIntMax).number_type == bigint + assert assert_type(Max(k, 9, bigint), BigIntMax).number_type == bigint + assert assert_type(Max(k, JSBigInt(9)), BigIntMax).number_type == bigint + + assert assert_type(Max(k, 9, "u64"), U64Max).number_type == u64 + assert assert_type(Max(k, 9, KvNumber.u64), U64Max).number_type == u64 + assert assert_type(Max(k, 9, KvU64), U64Max).number_type == u64 + assert assert_type(Max(k, 9, u64), U64Max).number_type == u64 + assert assert_type(Max(k, KvU64(9)), U64Max).number_type == u64 + + FooInt = NewType("FooInt", int) + BarInt = NewType("BarInt", int) + number_info: KvNumberInfo[Literal["test"], FooInt, BarInt] = cast(Any, bigint) + assert ( + assert_type( + Max(k, FooInt(1), number_info), Max[Literal["test"], FooInt, BarInt] + ) + ).number_type == number_info + + +@pytest.mark.parametrize("number_type", KvNumber) +def test_as_protobuf__float(number_type: KvNumber, v8_encoder: Encoder) -> None: + mutations = Max(k, 9, number_type.value, expire_at=T1).as_protobuf( + v8_encoder=v8_encoder + ) + assert len(mutations) > 0 + # We test the effect of Max mutations elsewhere, e.g. in test_kv. + assert all(isinstance(m, pb2.Mutation) for m in mutations) diff --git a/test/test__kv_writes__Min.py b/test/test__kv_writes__Min.py new file mode 100644 index 0000000..fd7b348 --- /dev/null +++ b/test/test__kv_writes__Min.py @@ -0,0 +1,111 @@ +import builtins +from datetime import datetime +from typing import Literal + +import pytest +from v8serialize import Encoder +from v8serialize.jstypes import JSBigInt + +from denokv import _datapath_pb2 as pb2 +from denokv._kv_values import KvU64 +from denokv._kv_writes import BigIntMin +from denokv._kv_writes import FloatMin +from denokv._kv_writes import KvNumber +from denokv._kv_writes import KvNumberInfo +from denokv._kv_writes import Min +from denokv._kv_writes import U64Min +from denokv._pycompat.typing import Any +from denokv._pycompat.typing import NewType +from denokv._pycompat.typing import assert_type +from denokv._pycompat.typing import cast +from denokv.kv_keys import KvKey +from test.denokv_testing import typeval + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +k = KvKey("a") + + +def test_init__float() -> None: + float_min = Min(k, 9, KvNumber.float.value) + assert float_min.key is k + assert typeval(float_min.value) == (int, 9) + assert float_min.number_type is KvNumber.float.value + + assert typeval(Min(k, 9.0).value) == (float, 9.0) + assert Min(k, 9) == float_min + assert Min(k, 9.0) == float_min + + for nt in ("float", KvNumber.float, builtins.float, KvNumber.float.value): + assert Min(k, 9, nt) == float_min + + assert Min(k, 9, "float", expire_at=T1).expire_at == T1 + + +def test_init__bigint() -> None: + bigint_min = Min(k, 9, KvNumber.bigint.value) + assert bigint_min.key is k + assert typeval(bigint_min.value) == (int, 9) + assert bigint_min.number_type is KvNumber.bigint.value + + assert Min(k, JSBigInt(9)) == bigint_min + + for nt in ("bigint", KvNumber.bigint, JSBigInt, KvNumber.bigint.value): + assert Min(k, 9, nt) == bigint_min + + assert Min(k, 9, "bigint", expire_at=T1).expire_at == T1 + + +def test_init__u64() -> None: + u64_min = Min(k, 9, KvNumber.u64.value) + assert u64_min.key is k + assert typeval(u64_min.value) == (int, 9) + assert u64_min.number_type is KvNumber.u64.value + + assert Min(k, KvU64(9)) == u64_min + + for nt in ("u64", KvNumber.u64, KvU64, KvNumber.u64.value): + assert Min(k, 9, nt) == u64_min + + assert Min(k, 9, "u64", expire_at=T1).expire_at == T1 + + +def test_init__overloads() -> None: + k = KvKey("a") + bigint, float, u64 = KvNumber.bigint.value, KvNumber.float.value, KvNumber.u64.value + assert assert_type(Min(k, 9), FloatMin).number_type == float + assert assert_type(Min(k, 9.0), FloatMin).number_type == float + assert assert_type(Min(k, 9, "float"), FloatMin).number_type == float + assert assert_type(Min(k, 9, KvNumber.float), FloatMin).number_type == float + assert assert_type(Min(k, 9, builtins.float), FloatMin).number_type == float + assert assert_type(Min(k, 9, float), FloatMin).number_type == float + + assert assert_type(Min(k, 9, "bigint"), BigIntMin).number_type == bigint + assert assert_type(Min(k, 9, KvNumber.bigint), BigIntMin).number_type == bigint + assert assert_type(Min(k, 9, JSBigInt), BigIntMin).number_type == bigint + assert assert_type(Min(k, 9, bigint), BigIntMin).number_type == bigint + assert assert_type(Min(k, JSBigInt(9)), BigIntMin).number_type == bigint + + assert assert_type(Min(k, 9, "u64"), U64Min).number_type == u64 + assert assert_type(Min(k, 9, KvNumber.u64), U64Min).number_type == u64 + assert assert_type(Min(k, 9, KvU64), U64Min).number_type == u64 + assert assert_type(Min(k, 9, u64), U64Min).number_type == u64 + assert assert_type(Min(k, KvU64(9)), U64Min).number_type == u64 + + FooInt = NewType("FooInt", int) + BarInt = NewType("BarInt", int) + number_info: KvNumberInfo[Literal["test"], FooInt, BarInt] = cast(Any, bigint) + assert ( + assert_type( + Min(k, FooInt(1), number_info), Min[Literal["test"], FooInt, BarInt] + ) + ).number_type == number_info + + +@pytest.mark.parametrize("number_type", KvNumber) +def test_as_protobuf__float(number_type: KvNumber, v8_encoder: Encoder) -> None: + mutations = Min(k, 9, number_type.value, expire_at=T1).as_protobuf( + v8_encoder=v8_encoder + ) + assert len(mutations) > 0 + # We test the effect of Min mutations elsewhere, e.g. in test_kv. + assert all(isinstance(m, pb2.Mutation) for m in mutations) diff --git a/test/test__kv_writes__PlannedWrite.py b/test/test__kv_writes__PlannedWrite.py new file mode 100644 index 0000000..216b295 --- /dev/null +++ b/test/test__kv_writes__PlannedWrite.py @@ -0,0 +1,273 @@ +from __future__ import annotations + +import re +from datetime import datetime +from unittest.mock import create_autospec + +import pytest +from v8serialize import Encoder +from v8serialize.jstypes import JSBigInt +from yarl import URL + +from denokv import _datapath_pb2 as datapath_pb2 +from denokv._kv_types import KvWriter +from denokv._kv_types import KvWriterWriteResult +from denokv._kv_values import KvEntry +from denokv._kv_values import KvU64 +from denokv._kv_values import VersionStamp +from denokv._kv_writes import Check +from denokv._kv_writes import CommittedWrite +from denokv._kv_writes import ConflictedWrite +from denokv._kv_writes import Delete +from denokv._kv_writes import Enqueue +from denokv._kv_writes import Limit +from denokv._kv_writes import LimitExceededPolicy +from denokv._kv_writes import Max +from denokv._kv_writes import Min +from denokv._kv_writes import PlannedWrite +from denokv._kv_writes import Sum +from denokv._pycompat.typing import TypedDict +from denokv.auth import ConsistencyLevel +from denokv.auth import EndpointInfo +from denokv.datapath import AutoRetry +from denokv.datapath import CheckFailure +from denokv.datapath import ResponseUnsuccessful +from denokv.kv_keys import KvKey +from denokv.result import Err +from denokv.result import Ok +from test.denokv_testing import mocked + +EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) + + +@pytest.fixture +def planned_write() -> PlannedWrite: + return ( + PlannedWrite() + .check(KvKey("check1"), VersionStamp(1)) + .check(KvKey("check2"), VersionStamp(2)) + .check(KvKey("check3"), None) + .sum(KvKey("sum1"), 1, clamp_under=0, clamp_over=2) + .sum(KvKey("sum2"), 2, "bigint", clamp_under=0) + .sum(KvKey("sum3"), 4, "u64") + .delete(KvKey("delete1")) + .enqueue(message="Hi", retry_delays=(1, 2, 3)) + ) + + +@pytest.mark.asyncio() +async def test_as_protobuf( + v8_encoder: Encoder, +) -> None: + assert PlannedWrite().as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.AtomicWrite(), + ) + + T1 = datetime.fromisoformat("2000-01-01T00:00:00Z") + + planned_write_start = PlannedWrite() + planned_write = ( + planned_write_start.check(KvEntry(KvKey("check1"), None, VersionStamp(1))) + .check(Check(KvKey("check2"), VersionStamp(2))) + .check_key_not_set(KvKey("check3")) + .check_key_has_version(KvKey("check4"), VersionStamp(4)) + .sum(KvKey("sum1"), KvU64(1)) + .sum(KvKey("sum2"), 2.0, abort_under=0) + .sum(KvKey("sum3"), 0.2, clamp_under=0, clamp_over=1) + .sum(KvKey("sum4"), 4.0, limit=Limit(1.0, 3.0, LimitExceededPolicy.CLAMP)) + .mutate(Sum(KvKey("sum5"), JSBigInt(42))) + .min(KvKey("min1"), 1) + .min(KvKey("min2"), KvU64(2)) + .mutate(Min(KvKey("min3"), 3, expire_at=T1)) + .max(KvKey("max1"), 1) + .max(KvKey("max2"), KvU64(2)) + .mutate(Max(KvKey("max3"), 3, expire_at=T1)) + .delete(KvKey("delete1")) + .mutate(Delete(KvKey("delete2"))) + .enqueue({"event": "example1"}, delivery_time=T1, retry_delays=[1, 10, 100]) + .enqueue( + Enqueue({"event": "example2"}, delivery_time=T1, retry_delays=[1, 10, 100]) + ) + ) + assert planned_write is planned_write_start # builder methods update in-place + + assert planned_write.as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.AtomicWrite( + checks=[ + pb_msg + for check in [ + Check(KvKey("check1"), VersionStamp(1)), + Check(KvKey("check2"), VersionStamp(2)), + Check(KvKey("check3"), None), + Check(KvKey("check4"), VersionStamp(4)), + ] + for pb_msg in check.as_protobuf(v8_encoder=v8_encoder) + ], + mutations=[ + pb_msg + for mutation in [ + Sum(KvKey("sum1"), KvU64(1)), + Sum(KvKey("sum2"), 2.0, abort_under=0), + Sum(KvKey("sum3"), 0.2, clamp_under=0, clamp_over=1), + Sum( + KvKey("sum4"), + 4.0, + limit=Limit(1.0, 3.0, LimitExceededPolicy.CLAMP), + ), + Sum(KvKey("sum5"), JSBigInt(42)), + Min(KvKey("min1"), 1), + Min(KvKey("min2"), KvU64(2)), + Min(KvKey("min3"), 3, expire_at=T1), + Max(KvKey("max1"), 1), + Max(KvKey("max2"), KvU64(2)), + Max(KvKey("max3"), 3, expire_at=T1), + Delete(KvKey("delete1")), + Delete(KvKey("delete2")), + ] + for pb_msg in mutation.as_protobuf(v8_encoder=v8_encoder) + ], + enqueues=[ + pb_msg + for enqueue in [ + Enqueue( + {"event": "example1"}, + delivery_time=T1, + retry_delays=[1, 10, 100], + ), + Enqueue( + {"event": "example2"}, + delivery_time=T1, + retry_delays=[1, 10, 100], + ), + ] + for pb_msg in enqueue.as_protobuf(v8_encoder=v8_encoder) + ], + ), + ) + + +class AtomicWriteRepresentationWriterWriteOptions(TypedDict, total=False): + kv: KvWriter + v8_encoder: Encoder + + +@pytest.mark.asyncio() +@pytest.mark.parametrize("kv_via_write_arg", [False, True]) +@pytest.mark.parametrize("v8_encoder_via_write_arg", [False, True]) +async def test_write__handles_successful_write( + kv_via_write_arg: bool, + v8_encoder_via_write_arg: bool, + planned_write: PlannedWrite, + v8_encoder: Encoder, +) -> None: + writer: KvWriter = create_autospec(KvWriter) + successful_write: KvWriterWriteResult = Ok((VersionStamp(1), EP)) + mocked(writer.write).return_value = successful_write + + kwargs = AtomicWriteRepresentationWriterWriteOptions() + if kv_via_write_arg: + kwargs["kv"] = writer + else: + planned_write.kv = writer + if v8_encoder_via_write_arg: + kwargs["v8_encoder"] = v8_encoder + else: + planned_write.v8_encoder = v8_encoder + + result = await planned_write.write(**kwargs) + + versionstamp, endpoint = successful_write.value_or_raise() + assert result == CommittedWrite( + versionstamp=versionstamp, + endpoint=endpoint, + checks=planned_write.checks, + mutations=planned_write.mutations, + enqueues=planned_write.enqueues, + ) + mocked(writer.write).assert_called_once_with( + protobuf_atomic_write=planned_write.as_protobuf(v8_encoder=v8_encoder)[0] + ) + + +@pytest.mark.asyncio() +async def test_write__handles_unsuccessful_conflicted_write( + planned_write: PlannedWrite, + v8_encoder: Encoder, +) -> None: + writer: KvWriter = create_autospec(KvWriter) + failed_write: KvWriterWriteResult = Err( + error := CheckFailure( + "Not all checks required by the Atomic Write passed", + all_checks=[pb for c in planned_write.checks for pb in c.as_protobuf()], + failed_check_indexes=[0, 2], + endpoint=EP, + ) + ) + mocked(writer.write).return_value = failed_write + + result = await planned_write.write(kv=writer, v8_encoder=v8_encoder) + + assert result == ConflictedWrite( + failed_checks=list(error.failed_check_indexes), + checks=planned_write.checks, + mutations=planned_write.mutations, + enqueues=planned_write.enqueues, + endpoint=error.endpoint, + ) + + +@pytest.mark.asyncio() +async def test_write__handles_write_request_failure( + planned_write: PlannedWrite, v8_encoder: Encoder +) -> None: + writer: KvWriter = create_autospec(KvWriter) + failed_write: KvWriterWriteResult = Err( + error := ResponseUnsuccessful( + "Server rejected Data Path request indicating client error", + status=403, + body_text="Permission denied", + endpoint=EP, + auto_retry=AutoRetry.NEVER, + ) + ) + mocked(writer.write).return_value = failed_write + + with pytest.raises(ResponseUnsuccessful) as exc_info: + await planned_write.write(kv=writer, v8_encoder=v8_encoder) + + assert exc_info.value == error + + +@pytest.mark.asyncio() +async def test_write__requires_Kv() -> None: + with pytest.raises( + TypeError, + match=re.escape( + "PlannedWrite.write() must get a value for its 'kv' argument when " + "'self.kv' isn't set" + ), + ): + await PlannedWrite().write() + + +@pytest.mark.asyncio() +async def test_check__raises_on_invalid_use() -> None: + with pytest.raises( + TypeError, + match=r"'versionstamp' argument cannot be set when the first argument to " + r"check\(\) is an object with 'key' and 'versionstamp' attributes", + ): + PlannedWrite().check( + KvEntry(KvKey("a"), None, versionstamp=VersionStamp(1)), + versionstamp=VersionStamp(2), + ) # type: ignore[call-overload] + + with pytest.raises( + TypeError, + match=r"'versionstamp' argument cannot be set when the first argument to " + r"check\(\) is an object with an 'as_protobuf' method", + ): + PlannedWrite().check( + Check(KvKey("a"), versionstamp=VersionStamp(1)), + versionstamp=VersionStamp(2), + ) # type: ignore[call-overload] diff --git a/test/test__kv_writes__Set.py b/test/test__kv_writes__Set.py new file mode 100644 index 0000000..068747c --- /dev/null +++ b/test/test__kv_writes__Set.py @@ -0,0 +1,77 @@ +from datetime import datetime + +from v8serialize import Encoder + +from denokv import _datapath_pb2 as datapath_pb2 +from denokv._kv_values import KvU64 +from denokv._kv_writes import Set +from denokv.kv_keys import KvKey + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") + + +def test_constructors() -> None: + value = {"foo": "bar"} + instance = Set(KvKey("a"), value) + assert instance.key == KvKey("a") + assert instance.value is value + assert instance.versioned is False + assert instance.expire_at is None + + instance = Set(KvKey("a"), value, expire_at=T1, versioned=True) + assert instance.key == KvKey("a") + assert instance.value is value + assert instance.versioned is True + assert instance.expire_at == T1 + + +def test_as_protobuf(v8_encoder: Encoder) -> None: + v8_value = {"foo": "bar"} + instance = Set(KvKey("a"), v8_value) + + assert instance.as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.Mutation( + mutation_type=datapath_pb2.M_SET, + key=bytes(KvKey("a")), + value=datapath_pb2.KvValue( + data=bytes(v8_encoder.encode(v8_value)), encoding=datapath_pb2.VE_V8 + ), + ), + ) + + byte_value = b"\x00\xff" + instance = Set(KvKey("a"), byte_value) + + assert instance.as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.Mutation( + mutation_type=datapath_pb2.M_SET, + key=bytes(KvKey("a")), + value=datapath_pb2.KvValue(data=byte_value, encoding=datapath_pb2.VE_BYTES), + ), + ) + + kvu64_value = KvU64(2) + instance = Set(KvKey("a"), kvu64_value) + + assert instance.as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.Mutation( + mutation_type=datapath_pb2.M_SET, + key=bytes(KvKey("a")), + value=datapath_pb2.KvValue( + data=bytes(kvu64_value), encoding=datapath_pb2.VE_LE64 + ), + ), + ) + + instance = Set(KvKey("a"), v8_value, expire_at=T1, versioned=True) + + assert instance.as_protobuf(v8_encoder=v8_encoder) == ( + datapath_pb2.Mutation( + mutation_type=datapath_pb2.M_SET_SUFFIX_VERSIONSTAMPED_KEY, + key=bytes(KvKey("a")), + value=datapath_pb2.KvValue( + data=bytes(v8_encoder.encode(v8_value)), encoding=datapath_pb2.VE_V8 + ), + expire_at_ms=int(T1.timestamp() * 1000), + ), + ) diff --git a/test/test__kv_writes__Sum.py b/test/test__kv_writes__Sum.py new file mode 100644 index 0000000..b0da85d --- /dev/null +++ b/test/test__kv_writes__Sum.py @@ -0,0 +1,393 @@ +from __future__ import annotations + +import re +from datetime import datetime +from decimal import Decimal +from math import isnan +from typing import Literal + +import pytest +from hypothesis import example +from hypothesis import given +from hypothesis import strategies as st +from v8serialize.constants import FLOAT64_SAFE_INT_RANGE +from v8serialize.jstypes import JSBigInt + +from denokv import _datapath_pb2 as datapath_pb2 +from denokv._kv_values import KvU64 +from denokv._kv_writes import LIMIT_KVU64 +from denokv._kv_writes import LIMIT_UNLIMITED +from denokv._kv_writes import BigIntSum +from denokv._kv_writes import FloatSum +from denokv._kv_writes import KvNumber +from denokv._kv_writes import KvNumberIdentifier +from denokv._kv_writes import KvNumberInfo +from denokv._kv_writes import KvNumberNameT +from denokv._kv_writes import KvNumberTypeT +from denokv._kv_writes import Limit +from denokv._kv_writes import LimitExceededPolicy +from denokv._kv_writes import NumberT +from denokv._kv_writes import Sum +from denokv._kv_writes import U64Sum +from denokv._pycompat.typing import Any +from denokv._pycompat.typing import NewType +from denokv._pycompat.typing import assert_type +from denokv._pycompat.typing import cast +from denokv.datapath import read_range_single +from denokv.kv_keys import KvKey +from denokv.result import Err +from denokv.result import Ok +from denokv.result import Result +from denokv.result import is_err +from test.denokv_testing import MockKvDb +from test.denokv_testing import SumLimitExceeded +from test.denokv_testing import add_entries +from test.denokv_testing import typeval +from test.denokv_testing import unsafe_parse_protobuf_kv_entry + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") + +u64 = st.integers(min_value=0, max_value=KvU64.RANGE.stop - 1) +neg_u64 = st.integers(min_value=-(KvU64.RANGE.stop - 1), max_value=0) + + +def test_init__limits() -> None: + sum1 = Sum(KvKey("a"), 1) + assert sum1.limit == LIMIT_UNLIMITED + + sum2 = Sum(KvKey("a"), 1, number_type="u64") + assert sum2.limit == LIMIT_KVU64 + + with pytest.raises( + ValueError, + match=re.escape( + "Limit keyword arguments in conflict: " + "Options abort_*=, clamp_*=, limit= cannot be used together.\n" + "Use limit=Limit(limit_exceeded=..., ...) to create a limit with a " + "dynamic type." + ), + ): + Sum(KvKey("a"), 1, limit=Limit(), clamp_over=1, abort_under=3) + + with pytest.raises( + ValueError, + match=re.escape( + "Limit keyword arguments in conflict: " + "Options abort_*=, clamp_*= cannot be used together.\n" + "Use limit=Limit(limit_exceeded=..., ...) to create a limit with a " + "dynamic type." + ), + ): + Sum(KvKey("a"), 1, clamp_over=1, abort_under=3) + + sum4 = Sum(KvKey("a"), 1, clamp_over=98, clamp_under=2) + assert sum4.limit == Limit(min=2, max=98, limit_exceeded="clamp") + + sum4 = Sum(KvKey("a"), 1, abort_under=3, abort_over=100) + assert sum4.limit == Limit(min=3, max=100, limit_exceeded="abort") + + sum6 = Sum( + KvKey("a"), + 1, + limit=Limit(min=3, limit_exceeded="clamp"), + expire_at=datetime.now(), + ) + assert sum6.limit == Limit(min=3, limit_exceeded="clamp") + + # Passing None as a clamp/abort enables that limit type with the default + sum7 = Sum(KvKey("a"), 1, "u64", clamp_under=None) + assert sum7.limit == Limit(min=None, max=None, limit_exceeded="clamp") + + sum8 = Sum(KvKey("a"), 1, abort_over=None) + assert sum8.limit == Limit(min=None, max=None, limit_exceeded="abort") + + sum9 = Sum(KvKey("a"), 1, "u64", limit=None) + assert sum9.limit == LIMIT_KVU64 + + +def test_init__overloads() -> None: + k = KvKey("a") + bigint, float, u64 = KvNumber.bigint.value, KvNumber.float.value, KvNumber.u64.value + assert assert_type(Sum(k, JSBigInt(1)), BigIntSum).number_type == bigint + assert assert_type(Sum(k, 1, "bigint"), BigIntSum).number_type == bigint + assert assert_type(Sum(k, KvU64(1)), U64Sum).number_type == u64 + assert assert_type(Sum(k, 1, "u64"), U64Sum).number_type == u64 + assert assert_type(Sum(k, 1), FloatSum).number_type == float + assert assert_type(Sum(k, 1.0), FloatSum).number_type == float + assert assert_type(Sum(k, 1.0, "float"), FloatSum).number_type == float + + FooInt = NewType("FooInt", int) + BarInt = NewType("BarInt", int) + number_info: KvNumberInfo[Literal["test"], FooInt, BarInt] = cast(Any, bigint) + assert ( + assert_type( + Sum(k, FooInt(1), number_info), Sum[Literal["test"], FooInt, BarInt] + ) + ).number_type == number_info + + +@pytest.mark.parametrize( + "delta,number_type,expected_delta,expected_number_type", + [ + (1, None, (int, 1), KvNumber.float.value), + (1.0, None, (float, 1.0), KvNumber.float.value), + (1, "float", (int, 1), KvNumber.float.value), + (1.0, float, (float, 1.0), KvNumber.float.value), + (1, KvNumber.float, (int, 1), KvNumber.float.value), + (1, KvNumber.float.value, (int, 1), KvNumber.float.value), + (JSBigInt(1), None, (int, 1), KvNumber.bigint.value), + (1, "bigint", (int, 1), KvNumber.bigint.value), + (1, JSBigInt, (int, 1), KvNumber.bigint.value), + (1, KvNumber.bigint, (int, 1), KvNumber.bigint.value), + (1, KvNumber.bigint.value, (int, 1), KvNumber.bigint.value), + (KvU64(1), None, (int, 1), KvNumber.u64.value), + (1, "u64", (int, 1), KvNumber.u64.value), + (1, KvU64, (int, 1), KvNumber.u64.value), + (1, KvNumber.u64, (int, 1), KvNumber.u64.value), + (1, KvNumber.u64.value, (int, 1), KvNumber.u64.value), + ], +) +def test_init__number_types( + delta: int | float | KvU64 | JSBigInt, + number_type: KvNumberInfo | KvNumberIdentifier | None, + expected_delta: tuple[type[int], int] | tuple[type[float], float], + expected_number_type: KvNumberInfo, +) -> None: + sum = Sum(KvKey("a"), delta, cast(KvNumberInfo, number_type)) + assert typeval(sum.delta) == expected_delta + assert sum.number_type is expected_number_type + + +def test_init() -> None: + k = KvKey("a") + limit1 = Limit(0, 10, "abort") + + sum1 = Sum(k, 1.0) + assert sum1.key is k + assert typeval(sum1.delta) == (float, 1.0) + assert sum1.number_type is KvNumber.float.value + assert sum1.expire_at is None + assert sum1.limit == LIMIT_UNLIMITED + + sum2 = Sum(k, 1, "float", expire_at=T1, limit=limit1) + assert sum2.key == k + assert typeval(sum2.delta) == (int, 1) + assert sum2.number_type is KvNumber.float.value + assert sum2.expire_at is T1 + assert sum2.limit is limit1 + + sum3 = Sum( + key=k, delta=JSBigInt(1), number_type="bigint", expire_at=T1, limit=limit1 + ) + assert sum3.key == k + assert typeval(sum3.delta) == (int, 1) + assert sum3.number_type is KvNumber.bigint.value + assert sum3.expire_at is T1 + assert sum3.limit is limit1 + + with pytest.raises( + TypeError, + match=re.escape("Sum.__init__() got an unexpected keyword argument 'foo'"), + ): + Sum(KvKey("a"), 0, foo="bar") # type: ignore[call-overload] + + +def test_init__unsupported_value_type_is_type_error() -> None: + with pytest.raises( + TypeError, + match=re.escape("number is not supported by any KvNumber: Decimal('42')"), + ): + Sum(KvKey("a"), Decimal(42)) # type: ignore[call-overload] + + with pytest.raises( + TypeError, + match=re.escape( + "number is not compatible with bigint py number type\n" + "number: Decimal('42') (), " + "bigint=BigIntKvNumberInfo(name='bigint', py_type=, " + "kv_type=)" + ), + ): + Sum(KvKey("a"), Decimal(42), "bigint") # type: ignore[call-overload] + + +def test_init__float_number_type_rejects_out_of_range_int_values() -> None: + with pytest.raises( + ValueError, + match=re.escape( + "number is not compatible with float py number type\n" + "number: 9007199254740992 (), " + "float=FloatKvNumberInfo(name='float', py_type=, " + "kv_type=)\n" + "The int is too large to represent as a 64-bit floating point value." + ), + ): + Sum(KvKey("a"), FLOAT64_SAFE_INT_RANGE.stop, "float") + + +def test_init__kvu64_limit_cannot_be_changed() -> None: + assert Sum(KvKey("a"), KvU64(1)).limit == LIMIT_KVU64 + assert Sum(KvKey("a"), KvU64(1), limit=LIMIT_KVU64) == Sum(KvKey("a"), KvU64(1)) + + custom_wrap_limit = Limit(max=42, limit_exceeded=LimitExceededPolicy.WRAP) # type: ignore[arg-type] + with pytest.raises( + ValueError, + match=re.escape( + "Number type 'u64' wrap limit's min, max bounds cannot be changed\n" + "'u64' (KvU64) can only wrap at 0 and 2^64 - 1. It can use clamp " + "with custom bounds through." + ), + ): + Sum(KvKey("a"), KvU64(1), limit=custom_wrap_limit) + + with pytest.raises( + ValueError, + match=re.escape( + "Number type 'bigint' does not support wrap limits\n" + "Use 'u64' (KvU64) to wrap on 0, 2^64 - 1 bounds." + ), + ): + Sum(KvKey("a"), 1, "bigint", limit=LIMIT_KVU64) + + +# delta values beyond +/-2^64 are wrapped to this range. We still include them +# as inputs, to ensure that we are handling them correctly though. We don't just +# use st.integers() as the input, as using the two separate u64 int classes +# should probe 64-bit boundary values more effectively than just using +# st.integers(). +@given(value=u64, delta=u64 | neg_u64 | st.integers()) +def test_as_protobuf__u64_wrap(value: int, delta: int) -> None: + expected = KvU64((value + delta) % KvU64.RANGE.stop) + sum = Sum(KvKey("a"), delta, "u64") + + actual = apply_sum_mutation(sum, value).value_or_raise() + assert actual == expected + + +@given( + value=u64, + delta=u64 | neg_u64 | st.integers(), + clamp_under=st.none() | u64, + clamp_over=st.none() | u64, +) +# Include examples to always hit branches, to avoid random coverage misses. +# constant result as clamp_over <= clamp_under +@example(value=0, delta=-1, clamp_under=0, clamp_over=0) +# constant result as result always meets clamp_under +@example(value=0, delta=-1, clamp_under=KvU64.RANGE.stop - 2, clamp_over=None) +def test_as_protobuf__u64_clamp( + value: int, delta: int, clamp_under: int | None, clamp_over: int | None +) -> None: + expected = KvU64( + min( + KvU64.RANGE.stop - 1 if clamp_over is None else clamp_over, + max( + 0 if clamp_under is None else clamp_under, + value + delta, + ), + ) + ) + sum = Sum(KvKey("a"), delta, "u64", clamp_under=clamp_under, clamp_over=clamp_over) + actual = apply_sum_mutation(sum, value).value_or_raise() + assert actual == expected + + +floats = st.floats(allow_nan=True) +float_safe_integers = st.integers( + min_value=FLOAT64_SAFE_INT_RANGE.start, max_value=FLOAT64_SAFE_INT_RANGE.stop - 1 +) +v8_sum_limits_bigint: st.SearchStrategy[Limit[int]] = st.builds( + Limit, + min=st.none() | st.integers(), + max=st.none() | st.integers(), + limit_exceeded=st.sampled_from( + [LimitExceededPolicy.ABORT, LimitExceededPolicy.CLAMP] + ), +) +v8_sum_limits_float: st.SearchStrategy[Limit[float]] = st.builds( + Limit, + max=st.none() | float_safe_integers | floats, + min=st.none() | float_safe_integers | floats, + limit_exceeded=st.sampled_from( + [ + LimitExceededPolicy.ABORT, + LimitExceededPolicy.CLAMP, + ] + ), +) + + +@given(value=st.integers(), delta=st.integers(), limit=v8_sum_limits_bigint) +def test_as_protobuf__v8_bigint(value: int, delta: int, limit: Limit[int]) -> None: + _test_as_protobuf__v8(KvNumber.bigint.value, value, delta, limit) + + +@given( + value=float_safe_integers | floats, + delta=float_safe_integers | floats, + limit=v8_sum_limits_float, +) +def test_as_protobuf__v8_float(value: float, delta: float, limit: Limit[float]) -> None: + _test_as_protobuf__v8(KvNumber.float.value, value, delta, limit) + + +def _test_as_protobuf__v8( + number_type: KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], + value: NumberT, + delta: NumberT, + limit: Limit[NumberT], +) -> None: + if limit.limit_exceeded == LimitExceededPolicy.ABORT: + # Explicitly calculate the expected result in the kv type, as with + # floats, we can add int values and get greater precision than we would + # with actual floats. (Normally as_kv_type() preserves ints in + # float-safe range.) + expected_value = number_type.kv_type(value) + number_type.kv_type(delta) # type: ignore[call-arg,operator] + should_abort = False + if limit.min is not None and expected_value < limit.min: + should_abort = True + if limit.max is not None and expected_value > limit.max: + should_abort = True + else: + should_abort = False + expected_value = number_type.kv_type(value) + number_type.kv_type(delta) # type: ignore[call-arg,operator] + if limit.min is not None and expected_value < limit.min: + expected_value = limit.min + if limit.max is not None and expected_value > limit.max: + expected_value = limit.max + + sum = Sum(KvKey("a"), delta, number_type, limit=limit) + actual_result = apply_sum_mutation(sum, value) + + if should_abort: + assert is_err(actual_result) + assert isinstance(actual_result.error, SumLimitExceeded) + else: + actual_value = actual_result.value_or_raise() + assert actual_value == expected_value or all( + # We allow nan as an input, and nan can occur independently as a + # result, e.g. inf + -inf = nan. + isinstance(x, float) and isnan(x) + for x in (actual_value, expected_value) + ) + + +def apply_sum_mutation( + sum: Sum[str, NumberT, KvNumberTypeT], value: NumberT +) -> Result[KvNumberTypeT, SumLimitExceeded]: + db = MockKvDb() + add_entries(db, {sum.key: sum.number_type.as_kv_number(value)}) + mutations = sum.as_protobuf() + + try: + write_result = db.atomic_write(datapath_pb2.AtomicWrite(mutations=mutations)) + except Exception as e: + if isinstance((cause := e.__cause__), SumLimitExceeded): + return Err(cause) + raise e + + assert write_result.status == datapath_pb2.AW_SUCCESS + raw_entry = db.snapshot_read_range(read_range_single(sum.key)).values[0] + entry = unsafe_parse_protobuf_kv_entry(raw_entry) + assert sum.number_type.is_kv_number(entry.value) + return Ok(entry.value) diff --git a/test/test__kv_writes__U64KvNumberType.py b/test/test__kv_writes__U64KvNumberType.py new file mode 100644 index 0000000..625b3bd --- /dev/null +++ b/test/test__kv_writes__U64KvNumberType.py @@ -0,0 +1,202 @@ +""" +Proof-of-concepts for extended atomic mutations for KvU64 numbers. + +The default KvU64 supports sum() with positive delta and wrapping at 2**64. +This module also implements: + +- sum() with negative delta and wrapping at 2**64 +- sum() with positive and negative delta, with clamping at user-defined bounds + - This is the same as BigInt + - Which makes KvU64 more powerful than BigInt to some extent + +It does not support wrapping on custom bounds, or error limits. +""" + +from __future__ import annotations + +from hypothesis import example +from hypothesis import given +from hypothesis import strategies as st + +from denokv._kv_values import KvU64 + +u64 = st.integers(min_value=0, max_value=KvU64.RANGE.stop - 1) +neg_u64 = st.integers(min_value=-(KvU64.RANGE.stop - 1), max_value=0) + + +def sum_with_clamp__no_overflow( + value: int, delta: int, limit_min: int | None, limit_max: int | None +) -> int: + if limit_min is None: + limit_min = 0 + if limit_max is None: + limit_max = KvU64.RANGE.stop - 1 + + assert all(x in KvU64.RANGE for x in [value, abs(delta), limit_min, limit_max]) + + result = value + delta + if limit_min is not None: + result = max(limit_min, result) + if limit_max is not None: + result = min(limit_max, result) + return result + + +def sum_with_clamp__overflow( + value: int, delta: int, limit_min: int | None, limit_max: int | None +) -> int: + if limit_min is None: + limit_min = 0 + if limit_max is None: + limit_max = KvU64.RANGE.stop - 1 + assert all( + x in KvU64.RANGE for x in [value, delta, limit_min, limit_max] if x is not None + ) + + # When the upper limit is <= the delta, the result is always clamped at the + # upper limit. Likewise if the lower limit pushes the result above the upper + # limit, the upper limit is used (it's applied last). + min_result = 0 + delta + if limit_max <= min_result or limit_max <= limit_min: + return limit_max # set to max as mutation + + if limit_min >= limit_max or limit_min <= delta: + limit_min = None + + # Can be < 0 which is not allowed in practice. + # We can use high positive numbers like negative and rely on wrapping + max_start = limit_max - delta + if max_start < 0: + start = max(max_start % 2**64, value) + else: + start = min(max_start, value) + assert start in KvU64.RANGE + result = start + delta + result = result % KvU64.RANGE.stop + if limit_min is not None: + result = max(limit_min, result) + return result + + +@given(value=u64, delta=u64, limit_min=u64 | st.none(), limit_max=u64 | st.none()) +def test_sum_min_max( + value: int, delta: int, limit_min: int | None, limit_max: int | None +) -> None: + """Implement sum() with clamp for KvU64 (which can only wrap normally).""" + expected = sum_with_clamp__no_overflow(value, delta, limit_min, limit_max) + actual = sum_with_clamp__overflow(value, delta, limit_min, limit_max) + + assert actual == expected + + +# --------------------------- + + +def neg_sum_with_clamp__overflow( + value: int, + delta: int, + limit_min: int | None, + limit_max: int | None, +) -> int: + if limit_max is None: + limit_max = KvU64.RANGE.stop - 1 + if limit_min is None: + limit_min = 0 + assert delta <= 0 + + assert all(x in KvU64.RANGE for x in [value, abs(delta), limit_min, limit_max]) + + # If value after adding the delta is always <= the lower limit, the lower + # limit is always the result. However the upper limit applies last, so if + # the upper limit is lower than the lower limit, it applies instead. + if limit_max <= limit_min: + return limit_max # set to limit_max as mutation + max_result = (KvU64.RANGE.stop - 1) + delta + if limit_min >= max_result: + assert limit_max > limit_min + return limit_min # set to limit_min as mutation + + if limit_max >= max_result: + assert limit_max > limit_min + # limit_max can have no effect on the result + limit_max = None + + # Offset the start to prevent it going negative after adding the delta + min_start = abs(delta) + limit_min + if min_start >= KvU64.RANGE.stop: + start = min(min_start % KvU64.RANGE.stop, value) + else: + start = max(min_start, value) + assert start in KvU64.RANGE + + # Make the negative delta to a positive value that overflows to the original + # negative delta offset. + if delta < 0: + delta = KvU64.RANGE.stop + delta + assert delta in KvU64.RANGE + + # Apply the delta (effectively subtracting) + result = (start + delta) % (KvU64.RANGE.stop) + assert result in KvU64.RANGE + + if limit_max is not None: + result = min(limit_max, result) + return result + + +@given(value=u64, delta=neg_u64, limit_min=u64 | st.none(), limit_max=u64 | st.none()) +@example(value=2**64 - 1, delta=-1, limit_min=0, limit_max=2**64 - 3) +def test_negative_sum_min_max( + value: int, delta: int, limit_min: int | None, limit_max: int | None +) -> None: + """ + Implement sum() with negative delta for KvU64 with clamp min/max. + + UvU64 sum can only add positive values with wrapping normally. + """ + expected = sum_with_clamp__no_overflow(value, delta, limit_min, limit_max) + actual = neg_sum_with_clamp__overflow(value, delta, limit_min, limit_max) + + assert actual == expected + + +# --------------------------- + + +def neg_sum_with_wrap__no_overflow(value: int, delta: int) -> int: + assert delta <= 0 + assert all(x in KvU64.RANGE for x in [value, abs(delta)] if x is not None) + + result = (value + delta) % (2**64) + assert result in KvU64.RANGE + return result + + +def neg_sum_with_wrap__overflow( + value: int, + delta: int, +) -> int: + assert delta <= 0 + + assert all(x in KvU64.RANGE for x in [value, abs(delta)]) + + if delta == 0: + return value # no mutation + + delta = 2**64 + delta + assert delta in KvU64.RANGE + + result = (value + delta) % (2**64) + assert result in KvU64.RANGE + + return result + + +# TODO: can we do wrapping on custom limits, not just 0 and 2**64? +@given(value=u64, delta=neg_u64) +def test_negative_sum_with_wrap(value: int, delta: int) -> None: + """Implement sum() with negative delta for KvU64 (with wrapping).""" + expected = neg_sum_with_wrap__no_overflow(value, delta) + actual = neg_sum_with_wrap__overflow(value, delta) + + assert actual == expected diff --git a/test/test_kv.py b/test/test_kv.py index a6e7f93..3406fbe 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -28,6 +28,7 @@ from hypothesis import settings from hypothesis import strategies as st from v8serialize import Decoder +from v8serialize.jstypes import JSBigInt from v8serialize.jstypes import JSMap from yarl import URL @@ -42,8 +43,9 @@ from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAY_COUNT +from denokv._kv_writes import LIMIT_KVU64 from denokv._kv_writes import Limit -from denokv._kv_writes import LimitExceededPolicy +from denokv._kv_writes import SumArgs from denokv._pycompat.enum import StrEnum from denokv._pycompat.typing import Any from denokv._pycompat.typing import AsyncGenerator @@ -333,7 +335,7 @@ def client_session(client: TestClient) -> aiohttp.ClientSession: return client.session -@pytest.fixture(params=[1, 2, 3]) +@pytest.fixture(params=[1, 2, 3], ids=lambda v: f"datapath_v{v}") def datapath_version(request: pytest.FixtureRequest) -> Literal[1, 2, 3]: assert request.param in (1, 2, 3) return cast(Literal[1, 2, 3], request.param) @@ -1040,39 +1042,42 @@ async def validate_write_outcome( "initial_val, sum_val, sum_kwargs, result", [ (12, 3, {}, 15), + (12, 3.5, {}, 15.5), + (JSBigInt(12), JSBigInt(3), {}, JSBigInt(15)), (12, -3, {}, 9), + (12, -3.5, {}, 8.5), + (JSBigInt(12), JSBigInt(-3), {}, JSBigInt(9)), (None, 3, {}, 3), - (12.5, 2.5, {}, 15.0), - (12.5, -2.5, {}, 10.0), + (12.5, 2.5, {}, 15), + (12.5, -2.5, {}, 10), (None, 2.5, {}, 2.5), (None, -2.5, {}, -2.5), (KvU64(12), KvU64(3), {}, KvU64(15)), - (KvU64(12), 3, {}, KvU64(15)), - (KvU64(12), -3, {}, KvU64(9)), + (KvU64(12), 3, SumArgs(number_type='u64'), KvU64(15)), + (KvU64(12), -3, SumArgs(number_type='u64'), KvU64(9)), (None, KvU64(3), {}, KvU64(3)), # KvU64 wraps on overflow - (KvU64(1), -3, {}, KvU64(2**64 - 2)), - (KvU64(2**64 - 2), 3, {}, KvU64(1)), + (KvU64(1), -3, SumArgs(number_type='u64'), KvU64(2**64 - 2)), + (KvU64(2**64 - 2), 3, SumArgs(number_type='u64'), KvU64(1)), # Limits - (12, 10, dict(limit_min=10, limit_max=20, limit_exceeded="clamp"), 20), - (12, -10, dict(limit_min=10, limit_max=20, limit_exceeded="clamp"), 10), - (12.0, 10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="clamp"), 20.0), # noqa: E501 - (12.0, -10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="clamp"), 10.0), # noqa: E501 + (JSBigInt(12), JSBigInt(10), SumArgs(clamp_under=10, clamp_over=20), JSBigInt(20)), # noqa: E501 + (JSBigInt(12), JSBigInt(-10), SumArgs(clamp_under=10, clamp_over=20), JSBigInt(10)), # noqa: E501 + (12.0, 10.0, SumArgs(clamp_under=10.0, clamp_over=20.0), 20), + (12.0, -10.0, SumArgs(clamp_under=10.0, clamp_over=20.0), 10), + (KvU64(12), KvU64(10), SumArgs(clamp_under=10, clamp_over=20), KvU64(20)), + (KvU64(12), -10, SumArgs(number_type='u64', clamp_under=10, clamp_over=20), KvU64(10)), # noqa: E501 # limit via Limit object - (12, -10, dict(limit=Limit(10, 20, 'clamp')), 10), - # kwargs override the limit object fields - (12, 10, dict(limit=Limit(9, 21, 'error'), limit_min=10, limit_max=20, limit_exceeded='clamp'), 20), # noqa: E501 - (12, -10, dict(limit=Limit(9, 21, 'error'), limit_min=10, limit_max=20, limit_exceeded='clamp'), 10), # noqa: E501 + (12, -10, SumArgs(limit=Limit(10, 20, 'clamp')), 10), # overflow with limit_exceeded error causes write to fail with client error - pytest.param(12, 10, dict(limit_min=10, limit_max=20, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-high-BigInt'), # noqa: E501 - pytest.param(12, -10, dict(limit_min=10, limit_max=20, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-low-BigInt'), # noqa: E501 - pytest.param(12.0, 10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-high-Number'), # noqa: E501 - pytest.param(12.0, -10.0, dict(limit_min=10.0, limit_max=20.0, limit_exceeded="error"), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-low-Number'), # noqa: E501 + pytest.param(12, 10, SumArgs(abort_under=10, abort_over=20 ), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-high-BigInt'), # noqa: E501 + pytest.param(12, -10, SumArgs(abort_under=10, abort_over=20 ), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-low-BigInt'), # noqa: E501 + pytest.param(12.0, 10.0, SumArgs(abort_under=10.0, abort_over=20.0 ), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-high-Number'), # noqa: E501 + pytest.param(12.0, -10.0, SumArgs(abort_under=10.0, abort_over=20.0 ), match_client_error("Mutation is not a valid M_SUM operation"), id='err-limit-low-Number'), # noqa: E501 # Cannot use limit_exceeded other than wrap for KvU64 - pytest.param(KvU64(12), KvU64(1), dict(limit_exceeded="error"), lambda e: isinstance(e, ValueError) and "limit for KvU64 cannot be changed, it must be None or LIMIT_KVU64" == str(e), id='err-invalid-exceeded-KvU64'), # noqa: E501 + pytest.param(KvU64(12), KvU64(1), SumArgs(abort_over=100), lambda e: isinstance(e, ValueError) and "Number type 'u64' does not support abort limits" == str(e), id='err-invalid-exceeded-KvU64'), # noqa: E501 # Cannot use limit_exceeded wrap for BigInt/Number - pytest.param(1, 1, dict(limit_exceeded=LimitExceededPolicy.WRAP), match_error(ValueError, "limit for JavaScript BigInt or Number cannot be WRAP, it must be ERROR or CLAMP"), id='err-invalid-exceeded-BigInt'), # noqa: E501 - pytest.param(1.0, 1.0, dict(limit_exceeded=LimitExceededPolicy.WRAP), match_error(ValueError, "limit for JavaScript BigInt or Number cannot be WRAP, it must be ERROR or CLAMP"), id='err-invalid-exceeded-Number'), # noqa: E501 + pytest.param(1, JSBigInt(1), SumArgs(limit=LIMIT_KVU64), match_error(ValueError, "Number type 'bigint' does not support wrap limits"), id='err-invalid-exceeded-BigInt'), # noqa: E501 + pytest.param(1.0, 1.0, SumArgs(limit=LIMIT_KVU64), match_error(ValueError, "Number type 'float' does not support wrap limits"), id='err-invalid-exceeded-Number'), # noqa: E501 ], ) # fmt: on @@ -1080,27 +1085,75 @@ async def validate_write_outcome( @pytest_mark_asyncio async def test_Kv_write__sum( kv: Kv, - initial_val: int | float | KvU64 | None, - sum_val: int | float | KvU64, - sum_kwargs: dict[str, Any], + initial_val: int | float | JSBigInt | KvU64 | None, + sum_val: int | float | JSBigInt | KvU64, + sum_kwargs: SumArgs[Any, Any, Any], result: int | float | KvU64 | Callable[[Exception], bool], ) -> None: async with validate_write_outcome(kv, initial_val, result) as (kv, key): - assert is_ok(await kv.atomic().sum(key, sum_val, **sum_kwargs).write()) + sum_args = SumArgs(key=key, delta=sum_val, **sum_kwargs) + assert is_ok(await kv.atomic().sum(**sum_args).write()) # type: ignore[arg-type] @pytest.mark.parametrize( "initial_val, max_val, max_kwargs, result", [ + (JSBigInt(12), JSBigInt(3), {}, JSBigInt(12)), + (JSBigInt(3), JSBigInt(12), {}, JSBigInt(12)), + (12.5, 3, {}, 12.5), + (3, 12.5, {}, 12.5), (KvU64(12), KvU64(3), {}, KvU64(12)), (KvU64(3), KvU64(12), {}, KvU64(12)), - # Cannot use max() on non KvU64 stored value - (3, KvU64(12), {}, match_client_error("SnapshotWrite is not valid")), + ( + JSBigInt(1), + 2.0, + {}, + # The errors reference M_SUM because bigint/number implement min/max + # using clamped M_SUM operations, not the actual M_MIN/M_MAX, + # because they only support u64. + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_SUM, number types are incompatible: " + "current type: JSBigInt (VE_V8 BigInt), " + "operand type: int/float (VE_V8 Number)", + ), + ), + ( + 1.5, + JSBigInt(2), + {}, + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_SUM, number types are incompatible: " + "current type: int/float (VE_V8 Number), " + "operand type: JSBigInt (VE_V8 BigInt)", + ), + ), ( KvU64(1), 2.0, {}, - match_error(TypeError, "value must be 8 bytes or a 64-bit unsigned int"), + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_SUM, number types are incompatible: " + "current type: KvU64 (VE_LE64), " + "operand type: int/float (VE_V8 Number)", + ), + ), + ( + 2.0, + KvU64(1), + {}, + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_MAX, number types are incompatible: " + "current type: int/float (VE_V8 Number), " + "operand type: KvU64 (VE_LE64)", + ), ), ], ) @@ -1119,15 +1172,62 @@ async def test_Kv_write__max( @pytest.mark.parametrize( "initial_val, min_val, min_kwargs, result", [ + (JSBigInt(12), JSBigInt(3), {}, JSBigInt(3)), + (JSBigInt(3), JSBigInt(12), {}, JSBigInt(3)), + (12, 3.1, {}, 3.1), + (3.1, 12, {}, 3.1), (KvU64(12), KvU64(3), {}, KvU64(3)), (KvU64(3), KvU64(12), {}, KvU64(3)), - # Cannot use min() on non KvU64 stored value - (3, KvU64(12), {}, match_client_error("SnapshotWrite is not valid")), + ( + JSBigInt(1), + 2.0, + {}, + # The errors reference M_SUM because bigint/number implement min/max + # using clamped M_SUM operations, not the actual M_MIN/M_MAX, + # because they only support u64. + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_SUM, number types are incompatible: " + "current type: JSBigInt (VE_V8 BigInt), " + "operand type: int/float (VE_V8 Number)", + ), + ), + ( + 1.5, + JSBigInt(2), + {}, + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_SUM, number types are incompatible: " + "current type: int/float (VE_V8 Number), " + "operand type: JSBigInt (VE_V8 BigInt)", + ), + ), ( KvU64(1), 2.0, {}, - match_error(TypeError, "value must be 8 bytes or a 64-bit unsigned int"), + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_SUM, number types are incompatible: " + "current type: KvU64 (VE_LE64), " + "operand type: int/float (VE_V8 Number)", + ), + ), + ( + 2.0, + KvU64(1), + {}, + match_error( + ResponseUnsuccessful, + "SnapshotWrite is not valid: " + "Cannot apply operation M_MIN, number types are incompatible: " + "current type: int/float (VE_V8 Number), " + "operand type: KvU64 (VE_LE64)", + ), ), ], ) From 8169df96c0523ea77da4b8594729b58fa2f0ad09 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Fri, 17 Jan 2025 03:49:13 +0000 Subject: [PATCH 39/52] refactor: extract PlannedWrite methods as mixins The set()/sum()/delete() etc methods of PlannedWrite are now implemented in individual mixin classes. This will allow us to implement the same set of methods on the Kv class, without duplicating all the horrific @overload annotations. --- src/denokv/_kv_writes.py | 313 +++++++++++++++----------- test/test__kv_writes__PlannedWrite.py | 2 + 2 files changed, 185 insertions(+), 130 deletions(-) diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index 89324fa..eabab99 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -75,6 +75,9 @@ ) KvNumberTypeT_co = TypeVar("KvNumberTypeT_co", covariant=True, default=object) U = TypeVar("U") +MutateResultT = TypeVar("MutateResultT") +EnqueueResultT = TypeVar("EnqueueResultT") +CheckResultT = TypeVar("CheckResultT") @total_ordering @@ -821,128 +824,67 @@ class SumArgs( ) -@dataclass -class PlannedWrite(AtomicWriteRepresentationWriter["CompletedWrite"]): - kv: KvWriter | None = field(default=None) - checks: MutableSequence[CheckRepresentation] = field(default_factory=list) - mutations: MutableSequence[MutationRepresentation] = field(default_factory=list) - enqueues: MutableSequence[EnqueueRepresentation] = field(default_factory=list) - v8_encoder: Encoder | None = field(default=None, kw_only=True) - - @override - async def write( - self, kv: KvWriter | None = None, *, v8_encoder: Encoder | None = None - ) -> CompletedWrite: - _kv = self.kv if kv is None else kv - if _kv is None: - raise TypeError( - f"{type(self).__name__}.write() must get a value for its 'kv' " - "argument when 'self.kv' isn't set" - ) - - _v8_encoder = self.v8_encoder if v8_encoder is None else v8_encoder - if _v8_encoder is None: - _v8_encoder = get_v8_encoder(_kv).value_or(None) - if _v8_encoder is None: - raise TypeError( - f"{type(self).__name__}.write() must get a value for its " - "'v8_encoder' keyword argument when 'self.v8_encoder' isn't " - "set and 'kv' does not provide one." - ) - - (pb_atomic_write,) = self.as_protobuf(v8_encoder=_v8_encoder) - # Copy the write components so that the results are not affected if the - # PlannedWrite is modified during this write. - checks = tuple(self.checks) - mutations = tuple(self.mutations) - enqueues = tuple(self.enqueues) - result = await _kv.write(protobuf_atomic_write=pb_atomic_write) - - if is_err(result): - if isinstance(result.error, CheckFailure): - check_failure = result.error - return ConflictedWrite( - failed_checks=list(check_failure.failed_check_indexes), - checks=checks, - mutations=mutations, - enqueues=enqueues, - endpoint=check_failure.endpoint, - ) - raise result.error - - versionstamp, endpoint = result.value - return CommittedWrite( - versionstamp=versionstamp, - checks=checks, - mutations=mutations, - enqueues=enqueues, - endpoint=endpoint, - ) - - def as_protobuf(self, *, v8_encoder: Encoder) -> tuple[AtomicWrite]: - return ( - AtomicWrite( - checks=[ - pb_msg - for check in self.checks - for pb_msg in check.as_protobuf(v8_encoder=v8_encoder) - ], - mutations=[ - pb_msg - for mut in self.mutations - for pb_msg in mut.as_protobuf(v8_encoder=v8_encoder) - ], - enqueues=[ - pb_msg - for enq in self.enqueues - for pb_msg in enq.as_protobuf(v8_encoder=v8_encoder) - ], - ), - ) +class CheckMixin(Generic[CheckResultT]): + @abstractmethod + def _check(self, check: CheckRepresentation, /) -> CheckResultT: + raise NotImplementedError @overload - def check(self, key: AnyKvKey, versionstamp: VersionStamp | None) -> Self: ... + def check( + self, key: AnyKvKey, versionstamp: VersionStamp | None = None + ) -> CheckResultT: ... @overload - def check(self, check: CheckRepresentation, /) -> Self: ... + def check(self, check: CheckRepresentation, /) -> CheckResultT: ... @overload - def check(self, check: AnyKeyVersion, /) -> Self: ... + def check(self, check: AnyKeyVersion, /) -> CheckResultT: ... def check( self, key: CheckRepresentation | AnyKeyVersion | AnyKvKey, versionstamp: VersionStamp | None = None, - ) -> Self: + ) -> CheckResultT: if isinstance(key, CheckRepresentation): if versionstamp is not None: raise TypeError( "'versionstamp' argument cannot be set when the first argument " "to check() is an object with an 'as_protobuf' method" ) - self.checks.append(key) + return self._check(key) elif isinstance(key, AnyKeyVersion): if versionstamp is not None: raise TypeError( "'versionstamp' argument cannot be set when the first argument " "to check() is an object with 'key' and 'versionstamp' attributes" ) - self.checks.append(Check(key.key, key.versionstamp)) + return self._check(Check(key.key, key.versionstamp)) else: - self.checks.append(Check(key, versionstamp)) - return self + return self._check(Check(key, versionstamp)) - def check_key_has_version(self, key: AnyKvKey, versionstamp: VersionStamp) -> Self: - self.checks.append(Check.for_key_with_version(key, versionstamp)) - return self + def check_key_has_version( + self, key: AnyKvKey, versionstamp: VersionStamp + ) -> CheckResultT: + return self._check(Check.for_key_with_version(key, versionstamp)) - def check_key_not_set(self, key: AnyKvKey) -> Self: - self.checks.append(Check.for_key_not_set(key)) - return self + def check_key_not_set(self, key: AnyKvKey) -> CheckResultT: + return self._check(Check.for_key_not_set(key)) - def set(self, key: AnyKvKey, value: object, *, versioned: bool = False) -> Self: + +class MutatorMixin(Generic[MutateResultT]): + @abstractmethod + def mutate(self, mutation: MutationRepresentation) -> MutateResultT: + raise NotImplementedError + + +class SetMutatorMixin(MutatorMixin[MutateResultT]): + def set( + self, key: AnyKvKey, value: object, *, versioned: bool = False + ) -> MutateResultT: return self.mutate(Set(key, value, versioned=versioned)) + +class SumMutatorMixin(MutatorMixin[MutateResultT]): # The overloads here have two categories: Firstly overloads based on known # Known KvNumber enum numbers — bigint, float and u64. Secondly, # generic/catch-all for any KvNumberInfo instance. @@ -953,7 +895,7 @@ def sum( delta: JSBigInt, number_type: None = None, **options: Unpack[SumOptions[int]], - ) -> Self: ... + ) -> MutateResultT: ... @overload def sum( @@ -962,7 +904,7 @@ def sum( delta: int | JSBigInt, number_type: BigIntKvNumberIdentifier, **options: Unpack[SumOptions[int]], - ) -> Self: ... + ) -> MutateResultT: ... @overload def sum( @@ -971,7 +913,7 @@ def sum( delta: KvU64, number_type: None = None, **options: Unpack[SumOptions[int]], - ) -> Self: ... + ) -> MutateResultT: ... @overload def sum( @@ -980,7 +922,7 @@ def sum( delta: int | KvU64, number_type: U64KvNumberIdentifier, **options: Unpack[SumOptions[int]], - ) -> Self: ... + ) -> MutateResultT: ... @overload def sum( @@ -989,7 +931,7 @@ def sum( delta: float, number_type: FloatKvNumberIdentifier | None = None, **options: Unpack[SumOptions[float]], - ) -> Self: ... + ) -> MutateResultT: ... @overload def sum( @@ -1001,7 +943,7 @@ def sum( # as float is incompatible with the other number types, but int is # compatible. **options: Unpack[SumOptions[NumberT]], - ) -> Self: ... + ) -> MutateResultT: ... def sum( self, @@ -1011,7 +953,7 @@ def sum( | KvNumberIdentifier | None = None, **options: Unpack[SumOptions[NumberT]], - ) -> Self: + ) -> MutateResultT: delta = cast(NumberT | KvNumberTypeT, delta) number_type = cast( KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type @@ -1023,7 +965,7 @@ def sum_bigint( key: AnyKvKey, delta: int | JSBigInt, **options: Unpack[SumOptions[int]], - ) -> Self: + ) -> MutateResultT: return self.sum(key, delta, number_type=KvNumber.bigint, **options) def sum_float( @@ -1031,7 +973,7 @@ def sum_float( key: AnyKvKey, delta: float, **options: Unpack[SumOptions[float]], - ) -> Self: + ) -> MutateResultT: return self.sum(key, delta, number_type=KvNumber.float, **options) def sum_kvu64( @@ -1039,9 +981,11 @@ def sum_kvu64( key: AnyKvKey, delta: int | KvU64, **options: Unpack[SumOptions[int]], - ) -> Self: + ) -> MutateResultT: return self.sum(key, delta, number_type=KvNumber.u64, **options) + +class MinMutatorMixin(MutatorMixin[MutateResultT]): @overload def min( self, @@ -1049,7 +993,7 @@ def min( value: JSBigInt, number_type: None = None, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def min( @@ -1058,7 +1002,7 @@ def min( value: int | JSBigInt, number_type: BigIntKvNumberIdentifier, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def min( @@ -1067,7 +1011,7 @@ def min( value: KvU64, number_type: None = None, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def min( @@ -1076,7 +1020,7 @@ def min( value: int | KvU64, number_type: U64KvNumberIdentifier, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def min( @@ -1085,7 +1029,7 @@ def min( value: float, number_type: FloatKvNumberIdentifier | None = None, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def min( @@ -1097,7 +1041,7 @@ def min( # as float is incompatible with the other number types, but int is # compatible. **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... def min( self, @@ -1107,7 +1051,7 @@ def min( | KvNumberIdentifier | None = None, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: value = cast(NumberT | KvNumberTypeT, value) number_type = cast( KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type @@ -1119,7 +1063,7 @@ def min_bigint( key: AnyKvKey, value: int | JSBigInt, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: return self.min(key, value, number_type=KvNumber.bigint, **options) def min_float( @@ -1127,7 +1071,7 @@ def min_float( key: AnyKvKey, value: float, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: return self.min(key, value, number_type=KvNumber.float, **options) def min_kvu64( @@ -1135,9 +1079,11 @@ def min_kvu64( key: AnyKvKey, value: int | KvU64, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: return self.min(key, value, number_type=KvNumber.u64, **options) + +class MaxMutatorMixin(MutatorMixin[MutateResultT]): @overload def max( self, @@ -1145,7 +1091,7 @@ def max( value: JSBigInt, number_type: None = None, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def max( @@ -1154,7 +1100,7 @@ def max( value: int | JSBigInt, number_type: BigIntKvNumberIdentifier, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def max( @@ -1163,7 +1109,7 @@ def max( value: KvU64, number_type: None = None, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def max( @@ -1172,7 +1118,7 @@ def max( value: int | KvU64, number_type: U64KvNumberIdentifier, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def max( @@ -1181,7 +1127,7 @@ def max( value: float, number_type: FloatKvNumberIdentifier | None = None, **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... @overload def max( @@ -1193,7 +1139,7 @@ def max( # as float is incompatible with the other number types, but int is # compatible. **options: Unpack[MutationOptions], - ) -> Self: ... + ) -> MutateResultT: ... def max( self, @@ -1203,7 +1149,7 @@ def max( | KvNumberIdentifier | None = None, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: value = cast(NumberT | KvNumberTypeT, value) number_type = cast( KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type @@ -1215,7 +1161,7 @@ def max_bigint( key: AnyKvKey, value: int | JSBigInt, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: return self.max(key, value, number_type=KvNumber.bigint, **options) def max_float( @@ -1223,7 +1169,7 @@ def max_float( key: AnyKvKey, value: float, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: return self.max(key, value, number_type=KvNumber.float, **options) def max_kvu64( @@ -1231,20 +1177,24 @@ def max_kvu64( key: AnyKvKey, value: int | KvU64, **options: Unpack[MutationOptions], - ) -> Self: + ) -> MutateResultT: return self.max(key, value, number_type=KvNumber.u64, **options) - def delete(self, key: AnyKvKey) -> Self: + +class DeleteMutatorMixin(MutatorMixin[MutateResultT]): + def delete(self, key: AnyKvKey) -> MutateResultT: if isinstance(key, Delete): return self.mutate(key) return self.mutate(Delete(key)) - def mutate(self, mutation: MutationRepresentation) -> Self: - self.mutations.append(mutation) - return self + +class EnqueueMixin(Generic[EnqueueResultT]): + @abstractmethod + def _enqueue(self, enqueue: Enqueue, /) -> EnqueueResultT: + raise NotImplementedError @overload - def enqueue(self, enqueue: Enqueue, /) -> Self: ... + def enqueue(self, enqueue: Enqueue, /) -> EnqueueResultT: ... @overload def enqueue( @@ -1254,7 +1204,7 @@ def enqueue( delivery_time: datetime | None = None, retry_delays: Backoff | None = None, dead_letter_keys: Sequence[AnyKvKey] | None = None, - ) -> Self: ... + ) -> EnqueueResultT: ... def enqueue( self, @@ -1263,7 +1213,7 @@ def enqueue( delivery_time: datetime | None = None, retry_delays: Backoff | None = None, dead_letter_keys: Sequence[AnyKvKey] | None = None, - ) -> Self: + ) -> EnqueueResultT: if isinstance(message, Enqueue): enqueue = message else: @@ -1273,6 +1223,109 @@ def enqueue( retry_delays=retry_delays, dead_letter_keys=dead_letter_keys, ) + return self._enqueue(enqueue) + + +@dataclass +class PlannedWrite( + CheckMixin["PlannedWrite"], + SetMutatorMixin["PlannedWrite"], + SumMutatorMixin["PlannedWrite"], + MinMutatorMixin["PlannedWrite"], + MaxMutatorMixin["PlannedWrite"], + DeleteMutatorMixin["PlannedWrite"], + EnqueueMixin["PlannedWrite"], + AtomicWriteRepresentationWriter["CompletedWrite"], +): + kv: KvWriter | None = field(default=None) + checks: MutableSequence[CheckRepresentation] = field(default_factory=list) + mutations: MutableSequence[MutationRepresentation] = field(default_factory=list) + enqueues: MutableSequence[EnqueueRepresentation] = field(default_factory=list) + v8_encoder: Encoder | None = field(default=None, kw_only=True) + + @override + async def write( + self, kv: KvWriter | None = None, *, v8_encoder: Encoder | None = None + ) -> CompletedWrite: + _kv = self.kv if kv is None else kv + if _kv is None: + raise TypeError( + f"{type(self).__name__}.write() must get a value for its 'kv' " + "argument when 'self.kv' isn't set" + ) + + _v8_encoder = self.v8_encoder if v8_encoder is None else v8_encoder + if _v8_encoder is None: + _v8_encoder = get_v8_encoder(_kv).value_or(None) + if _v8_encoder is None: + raise TypeError( + f"{type(self).__name__}.write() must get a value for its " + "'v8_encoder' keyword argument when 'self.v8_encoder' isn't " + "set and 'kv' does not provide one." + ) + + (pb_atomic_write,) = self.as_protobuf(v8_encoder=_v8_encoder) + # Copy the write components so that the results are not affected if the + # PlannedWrite is modified during this write. + checks = tuple(self.checks) + mutations = tuple(self.mutations) + enqueues = tuple(self.enqueues) + result = await _kv.write(protobuf_atomic_write=pb_atomic_write) + + if is_err(result): + if isinstance(result.error, CheckFailure): + check_failure = result.error + return ConflictedWrite( + failed_checks=list(check_failure.failed_check_indexes), + checks=checks, + mutations=mutations, + enqueues=enqueues, + endpoint=check_failure.endpoint, + ) + raise result.error + + versionstamp, endpoint = result.value + return CommittedWrite( + versionstamp=versionstamp, + checks=checks, + mutations=mutations, + enqueues=enqueues, + endpoint=endpoint, + ) + + def as_protobuf(self, *, v8_encoder: Encoder) -> tuple[AtomicWrite]: + return ( + AtomicWrite( + checks=[ + pb_msg + for check in self.checks + for pb_msg in check.as_protobuf(v8_encoder=v8_encoder) + ], + mutations=[ + pb_msg + for mut in self.mutations + for pb_msg in mut.as_protobuf(v8_encoder=v8_encoder) + ], + enqueues=[ + pb_msg + for enq in self.enqueues + for pb_msg in enq.as_protobuf(v8_encoder=v8_encoder) + ], + ), + ) + + @override + def _check(self, check: CheckRepresentation, /) -> Self: + self.checks.append(check) + return self + + @override + def mutate(self, mutation: MutationRepresentation) -> Self: + self.mutations.append(mutation) + return self + + @override + def _enqueue(self, enqueue: Enqueue, /) -> Self: self.enqueues.append(enqueue) return self diff --git a/test/test__kv_writes__PlannedWrite.py b/test/test__kv_writes__PlannedWrite.py index 216b295..fa60f09 100644 --- a/test/test__kv_writes__PlannedWrite.py +++ b/test/test__kv_writes__PlannedWrite.py @@ -71,6 +71,7 @@ async def test_as_protobuf( .check(Check(KvKey("check2"), VersionStamp(2))) .check_key_not_set(KvKey("check3")) .check_key_has_version(KvKey("check4"), VersionStamp(4)) + .check(KvKey("check5")) .sum(KvKey("sum1"), KvU64(1)) .sum(KvKey("sum2"), 2.0, abort_under=0) .sum(KvKey("sum3"), 0.2, clamp_under=0, clamp_over=1) @@ -100,6 +101,7 @@ async def test_as_protobuf( Check(KvKey("check2"), VersionStamp(2)), Check(KvKey("check3"), None), Check(KvKey("check4"), VersionStamp(4)), + Check(KvKey("check5"), None), ] for pb_msg in check.as_protobuf(v8_encoder=v8_encoder) ], From c85705fce54555d534c977cdba4af6a6377c66a6 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Wed, 29 Jan 2025 04:58:07 +0000 Subject: [PATCH 40/52] feat: allow DenoKvError to have no message argument It used to always require a message string, which made it awkward to subclass for errors that don't want to define a message at init. --- src/denokv/errors.py | 18 ++++++++++-------- test/test_errors.py | 11 +++++++++++ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/src/denokv/errors.py b/src/denokv/errors.py index 4326423..db59de0 100644 --- a/src/denokv/errors.py +++ b/src/denokv/errors.py @@ -1,20 +1,22 @@ from dataclasses import dataclass -from denokv._pycompat.typing import cast +from denokv._pycompat.typing import TYPE_CHECKING @dataclass(init=False) class DenoKvError(Exception): - message: str + # Define message for dataclass field metadata only, not type annotation. + if not TYPE_CHECKING: + message: str - def __init__(self, message: str, *args: object) -> None: - super().__init__(message, *args) - if not isinstance(message, str): - raise TypeError(f"first argument must be a str message: {message!r}") + def __init__(self, *args: object) -> None: + super(DenoKvError, self).__init__(*args) - @property # type: ignore[no-redef] + @property def message(self) -> str: - return cast(str, self.args[0]) + if args := self.args: + return str(args[0]) + return type(self).__name__ class DenoKvValidationError(ValueError, DenoKvError): diff --git a/test/test_errors.py b/test/test_errors.py index 031bc42..cf96ff4 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -7,3 +7,14 @@ def test_errors_are_regular_exceptions() -> None: """Errors must be caught by generic Exception handlers — not BaseException.""" with pytest.raises(Exception): # noqa: B017 raise DenoKvError("error") + + +def test_DenoKvError_message() -> None: + assert DenoKvError().message == "DenoKvError" + assert DenoKvError("Foo bar").message == "Foo bar" + + class CustomError(DenoKvError): + pass + + assert CustomError().message == "CustomError" + assert CustomError("Bar baz").message == "Bar baz" From 1517bb049acd4356fdfabaf780d7c7b7e3b099d9 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 07:24:53 +0000 Subject: [PATCH 41/52] feat: make FailedWrite, ConflictedWrite exceptions Rather than throwing the lower-level errors from datapath, we'll now use one of these two higher-level errors. --- src/denokv/_kv_writes.py | 149 +++++++++++++++++------ test/test__kv_writes__CommittedWrite.py | 7 +- test/test__kv_writes__ConflictedWrite.py | 72 +++++++++-- test/test__kv_writes__FailedWrite.py | 116 ++++++++++++++++++ test/test__kv_writes__PlannedWrite.py | 5 +- 5 files changed, 296 insertions(+), 53 deletions(-) create mode 100644 test/test__kv_writes__FailedWrite.py diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index eabab99..b4206ba 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -35,7 +35,9 @@ from denokv._pycompat.typing import Any from denokv._pycompat.typing import ClassVar from denokv._pycompat.typing import Container +from denokv._pycompat.typing import Final from denokv._pycompat.typing import Generic +from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import MutableSequence from denokv._pycompat.typing import Never @@ -60,6 +62,7 @@ from denokv.datapath import AnyKvKey from denokv.datapath import CheckFailure from denokv.datapath import pack_key +from denokv.errors import DenoKvError from denokv.kv_keys import KvKey from denokv.result import AnyFailure from denokv.result import AnySuccess @@ -1330,51 +1333,112 @@ def _enqueue(self, enqueue: Enqueue, /) -> Self: return self -@dataclass(init=False, unsafe_hash=True, **slots_if310()) -class ConflictedWrite(FrozenAfterInitDataclass, AnyFailure): +EMPTY_MAP: Final[Mapping[Any, Any]] = MappingProxyType({}) + + +# TODO: Support capturing retries in the FailedWrite/CommittedWrite? +@dataclass(init=False, unsafe_hash=True) +class FailedWrite(FrozenAfterInitDataclass, AnyFailure, DenoKvError): if TYPE_CHECKING: def _AnyFailure_marker(self, no_call: Never) -> Never: ... - ok: Literal[False] - conflicts: Mapping[AnyKvKey, CheckRepresentation] - versionstamp: None - checks: Sequence[CheckRepresentation] - mutations: Sequence[MutationRepresentation] - enqueues: Sequence[EnqueueRepresentation] - endpoint: EndpointInfo + checks: Final[Sequence[CheckRepresentation]] = field() + failed_checks: Final[Sequence[int]] = field() + mutations: Final[Sequence[MutationRepresentation]] = field() + enqueues: Final[Sequence[EnqueueRepresentation]] = field() + endpoint: Final[EndpointInfo] = field() + ok: Final[Literal[False]] = False # noqa: PYI064 + versionstamp: Final[None] = None def __init__( self, - failed_checks: Sequence[int], - checks: Sequence[CheckRepresentation], - mutations: Sequence[MutationRepresentation], - enqueues: Sequence[EnqueueRepresentation], + checks: Iterable[CheckRepresentation], + mutations: Iterable[MutationRepresentation], + enqueues: Iterable[EnqueueRepresentation], endpoint: EndpointInfo, + *, + cause: BaseException | None = None, ) -> None: - self.ok = False - try: - self.conflicts = MappingProxyType( - {checks[i].key: checks[i] for i in failed_checks} - ) - except IndexError as e: - raise ValueError("failed_checks contains out-of-bounds index") from e - self.versionstamp = None - self.checks = tuple(checks) - self.mutations = tuple(mutations) - self.enqueues = tuple(enqueues) - self.endpoint = endpoint + super(FailedWrite, self).__init__() + self.checks = tuple(checks) # type: ignore[misc] # Cannot assign to final + # Allow subclass to initialise failed_checks + if not hasattr(self, "failed_checks"): + self.failed_checks = tuple() # type: ignore[misc] # Cannot assign to final + self.mutations = tuple(mutations) # type: ignore[misc] # Cannot assign to final + self.enqueues = tuple(enqueues) # type: ignore[misc] # Cannot assign to final + self.endpoint = endpoint # type: ignore[misc] # Cannot assign to final + self.__cause__ = cause + + @property + def conflicts(self) -> Mapping[AnyKvKey, CheckRepresentation]: + checks = self.checks + return {checks[i].key: checks[i] for i in self.failed_checks} + + def _get_cause_description(self) -> str: + if self.__cause__: + return type(self.__cause__).__name__ + return "unspecified cause" + + @property + def message(self) -> str: + # TODO: after xxx attempts? + return ( + f"to {str(self.endpoint.url)!r} " + f"due to {self._get_cause_description()}, " + f"with {len(self.checks)} checks, " + f"{len(self.mutations)} mutations, " + f"{len(self.enqueues)} enqueues" + ) + + def __str__(self) -> str: + return f"Write failed {self.message}" def __repr__(self) -> str: + return f"<{type(self).__name__} {self.message}>" + + +def _normalise_failed_checks( + failed_checks: Iterable[int], check_count: int +) -> tuple[int, ...]: + failed_checks = tuple(sorted(failed_checks)) + if failed_checks and (failed_checks[0] < 0 or failed_checks[-1] >= check_count): + raise ValueError("failed_checks contains out-of-bounds index") + return failed_checks + + +class ConflictedWrite(FailedWrite): + def __init__( + self, + failed_checks: Iterable[int], + checks: Iterable[CheckRepresentation], + mutations: Iterable[MutationRepresentation], + enqueues: Iterable[EnqueueRepresentation], + endpoint: EndpointInfo, + *, + cause: BaseException | None = None, + ) -> None: + _checks = tuple(checks) + self.failed_checks = _normalise_failed_checks( # type: ignore[misc] # Cannot assign to final attribute "failed_checks" + failed_checks, + check_count=len(_checks), + ) + super(ConflictedWrite, self).__init__( + _checks, mutations, enqueues, endpoint, cause=cause + ) + + @property + def message(self) -> str: return ( - f"<{type(self).__name__} " f"NOT APPLIED to {str(self.endpoint.url)!r} with " f"{len(self.conflicts)}/{len(self.checks)} checks CONFLICTING, " f"{len(self.mutations)} mutations, " f"{len(self.enqueues)} enqueues" - f">" ) + def __str__(self) -> str: + return f"Write {self.message}" + @dataclass(init=False, unsafe_hash=True, **slots_if310()) class CommittedWrite(FrozenAfterInitDataclass, AnySuccess): @@ -1382,13 +1446,13 @@ class CommittedWrite(FrozenAfterInitDataclass, AnySuccess): def _AnySuccess_marker(self, no_call: Never) -> Never: ... - ok: Literal[True] - conflicts: Mapping[KvKey, CheckRepresentation] # empty - versionstamp: VersionStamp - checks: Sequence[CheckRepresentation] - mutations: Sequence[MutationRepresentation] - enqueues: Sequence[EnqueueRepresentation] - endpoint: EndpointInfo + ok: Final[Literal[True]] # noqa: PYI064 + conflicts: Final[Mapping[KvKey, CheckRepresentation]] # empty + versionstamp: Final[VersionStamp] + checks: Final[Sequence[CheckRepresentation]] + mutations: Final[Sequence[MutationRepresentation]] + enqueues: Final[Sequence[EnqueueRepresentation]] + endpoint: Final[EndpointInfo] def __init__( self, @@ -1399,23 +1463,28 @@ def __init__( endpoint: EndpointInfo, ) -> None: self.ok = True - self.conflicts = MappingProxyType({}) + self.conflicts = EMPTY_MAP self.versionstamp = versionstamp self.checks = tuple(checks) self.mutations = tuple(mutations) self.enqueues = tuple(enqueues) self.endpoint = endpoint - def __repr__(self) -> str: + @property + def _message(self) -> str: return ( - f"<{type(self).__name__} " f"version 0x{self.versionstamp} to {str(self.endpoint.url)!r} with " f"{len(self.checks)} checks, " f"{len(self.mutations)} mutations, " f"{len(self.enqueues)} enqueues" - f">" ) + def __str__(self) -> str: + return f"Write committed {self._message}" + + def __repr__(self) -> str: + return f"<{type(self).__name__} {self._message}>" + CompletedWrite: TypeAlias = Union[CommittedWrite, ConflictedWrite] @@ -2078,4 +2147,6 @@ def _evaluate_backoff_schedule(self) -> Sequence[int]: return [int(delay * 1000) for delay in delay_seconds] -WriteOperation: TypeAlias = Union[Check, Set, Sum, Min, Max, Delete, Enqueue] +WriteOperation: TypeAlias = Union[ + CheckRepresentation, MutationRepresentation, EnqueueRepresentation +] diff --git a/test/test__kv_writes__CommittedWrite.py b/test/test__kv_writes__CommittedWrite.py index c6b6ff2..bc08c42 100644 --- a/test/test__kv_writes__CommittedWrite.py +++ b/test/test__kv_writes__CommittedWrite.py @@ -61,7 +61,10 @@ def test_str_repr() -> None: endpoint=EP, ) assert ( - str(instance) == "" ) - assert str(instance) == repr(instance) diff --git a/test/test__kv_writes__ConflictedWrite.py b/test/test__kv_writes__ConflictedWrite.py index 39667f6..4c0d5dd 100644 --- a/test/test__kv_writes__ConflictedWrite.py +++ b/test/test__kv_writes__ConflictedWrite.py @@ -1,14 +1,17 @@ +import traceback from datetime import datetime import pytest from yarl import URL +from denokv import _datapath_pb2 as datapath_pb2 from denokv._kv_writes import Check from denokv._kv_writes import ConflictedWrite from denokv._kv_writes import Enqueue from denokv._kv_writes import Set from denokv.auth import ConsistencyLevel from denokv.auth import EndpointInfo +from denokv.datapath import CheckFailure from denokv.kv_keys import KvKey from denokv.result import is_err @@ -18,17 +21,32 @@ @pytest.fixture def instance() -> ConflictedWrite: + pb_checks = [ + datapath_pb2.Check(key=bytes(KvKey("a")), versionstamp=None), + datapath_pb2.Check(key=bytes(KvKey("b")), versionstamp=None), + datapath_pb2.Check(key=bytes(KvKey("c")), versionstamp=None), + ] checks = [ Check.for_key_not_set(KvKey("a")), Check.for_key_not_set(KvKey("b")), Check.for_key_not_set(KvKey("c")), ] + failed_checks = [0, 2] + + cause = CheckFailure( + "Not all checks required by the Atomic Write passed", + all_checks=pb_checks, + failed_check_indexes=failed_checks, + endpoint=EP, + ) + return ConflictedWrite( - failed_checks=[0, 2], - checks=list(checks), + failed_checks=failed_checks, + checks=checks, mutations=[Set(KvKey("a"), 42)], enqueues=[Enqueue("Hi")], endpoint=EP, + cause=cause, ) @@ -53,13 +71,9 @@ def test_constructor(instance: ConflictedWrite) -> None: assert instance.enqueues == (Enqueue("Hi"),) assert instance.endpoint is EP - assert dict(instance.conflicts) == {KvKey("a"): checks[0], KvKey("c"): checks[2]} + assert instance.conflicts == {KvKey("a"): checks[0], KvKey("c"): checks[2]} assert instance.conflicts[KvKey("a")] is checks[0] - # conflicts is immutable - with pytest.raises(TypeError): - del instance.conflicts[KvKey("a")] # type: ignore[attr-defined] - with pytest.raises(ValueError, match=r"failed_checks contains out-of-bounds index"): ConflictedWrite( failed_checks=[0, 10], @@ -70,13 +84,51 @@ def test_constructor(instance: ConflictedWrite) -> None: ) +def test_changes_to_conflicts_do_not_persist(instance: ConflictedWrite) -> None: + assert isinstance(instance.conflicts, dict) + # Changes to conflicts do not persist + assert KvKey("a") in instance.conflicts + del instance.conflicts[KvKey("a")] + assert KvKey("a") in instance.conflicts + + def test_is_AnyFailure(instance: ConflictedWrite) -> None: assert is_err(instance) -def test_str_repr(instance: ConflictedWrite) -> None: +@pytest.mark.parametrize("with_cause", [True, False]) +def test_str(instance: ConflictedWrite, with_cause: bool) -> None: + assert instance.__cause__ + if not with_cause: + instance.__cause__ = None assert ( - str(instance) == " None: + assert instance.__cause__ + if not with_cause: + instance.__cause__ = None + + assert ( + repr(instance) == "" ) - assert str(instance) == repr(instance) + + +@pytest.mark.parametrize("with_cause", [True, False]) +def test_traceback_presentation(instance: ConflictedWrite, with_cause: bool) -> None: + assert instance.__cause__ + if not with_cause: + instance.__cause__ = None + + assert "\n".join( + traceback.format_exception_only(type(instance), instance) + ).strip() == ( + "denokv._kv_writes.ConflictedWrite: " + "Write NOT APPLIED to 'https://example.com/' " + "with 2/3 checks CONFLICTING, 1 mutations, 1 enqueues" + ) diff --git a/test/test__kv_writes__FailedWrite.py b/test/test__kv_writes__FailedWrite.py new file mode 100644 index 0000000..799fe3b --- /dev/null +++ b/test/test__kv_writes__FailedWrite.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +import traceback +from datetime import datetime + +import pytest +from yarl import URL + +from denokv._kv_writes import Check +from denokv._kv_writes import Enqueue +from denokv._kv_writes import FailedWrite +from denokv._kv_writes import Set +from denokv.auth import ConsistencyLevel +from denokv.auth import EndpointInfo +from denokv.datapath import ProtocolViolation +from denokv.kv_keys import KvKey +from denokv.result import is_err + +T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) + + +@pytest.fixture +def instance() -> FailedWrite: + checks = [ + Check.for_key_not_set(KvKey("a")), + Check.for_key_not_set(KvKey("b")), + Check.for_key_not_set(KvKey("c")), + ] + return FailedWrite( + checks=list(checks), + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + cause=ProtocolViolation("Server misbehaved", data=None, endpoint=EP), + ) + + +@pytest.mark.parametrize( + "cause", [None, ProtocolViolation("Server misbehaved", data=None, endpoint=EP)] +) +def test_constructor(cause: BaseException | None) -> None: + checks = [ + Check.for_key_not_set(KvKey("a")), + Check.for_key_not_set(KvKey("b")), + Check.for_key_not_set(KvKey("c")), + ] + instance = FailedWrite( + checks=list(checks), + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + cause=cause, + ) + + assert not instance.ok + assert instance.versionstamp is None + assert instance.checks == tuple(checks) + assert instance.mutations == (Set(KvKey("a"), 42),) + assert instance.enqueues == (Enqueue("Hi"),) + assert instance.endpoint is EP + assert instance.__cause__ is cause + + assert instance.conflicts == {} + + +def test_exception_attributes(instance: FailedWrite) -> None: + assert instance.args == () + + +def test_changes_to_conflicts_do_not_persist(instance: FailedWrite) -> None: + conflicts = instance.conflicts + assert isinstance(conflicts, dict) + # Changes to conflicts do not persist + conflicts[KvKey("a")] = instance.checks[0] + assert instance.conflicts == {} + + +def test_is_AnyFailure(instance: FailedWrite) -> None: + assert is_err(instance) + + +def test_str(instance: FailedWrite) -> None: + assert ( + str(instance) == "Write failed to 'https://example.com/' " + "due to ProtocolViolation, with 3 checks, 1 mutations, 1 enqueues" + ) + + instance.__cause__ = None + assert ( + str(instance) == "Write failed to 'https://example.com/' " + "due to unspecified cause, with 3 checks, 1 mutations, 1 enqueues" + ) + + +def test_repr(instance: FailedWrite) -> None: + assert ( + repr(instance) == "" + ) + + instance.__cause__ = None + assert ( + repr(instance) == "" + ) + + +def test_traceback_presentation(instance: FailedWrite) -> None: + assert "\n".join( + traceback.format_exception_only(type(instance), instance) + ).strip() == ( + "denokv._kv_writes.FailedWrite: Write failed " + "to 'https://example.com/' " + "due to ProtocolViolation, with 3 checks, 1 mutations, 1 enqueues" + ) diff --git a/test/test__kv_writes__PlannedWrite.py b/test/test__kv_writes__PlannedWrite.py index fa60f09..5d5005f 100644 --- a/test/test__kv_writes__PlannedWrite.py +++ b/test/test__kv_writes__PlannedWrite.py @@ -20,6 +20,7 @@ from denokv._kv_writes import ConflictedWrite from denokv._kv_writes import Delete from denokv._kv_writes import Enqueue +from denokv._kv_writes import FailedWrite from denokv._kv_writes import Limit from denokv._kv_writes import LimitExceededPolicy from denokv._kv_writes import Max @@ -234,10 +235,10 @@ async def test_write__handles_write_request_failure( ) mocked(writer.write).return_value = failed_write - with pytest.raises(ResponseUnsuccessful) as exc_info: + with pytest.raises(FailedWrite) as exc_info: await planned_write.write(kv=writer, v8_encoder=v8_encoder) - assert exc_info.value == error + assert exc_info.value.__cause__ == error @pytest.mark.asyncio() From d13ba06fd98748d40eb64887b88f72eabb6315d5 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Wed, 29 Jan 2025 06:28:42 +0000 Subject: [PATCH 42/52] chore: enable mypy possibly-undefined check Checks for unbound variable use. --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index b6a8ace..942641f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,9 @@ extra_standard_library = ["typing_extensions"] [tool.mypy] strict = true +enable_error_code = [ + 'possibly-undefined' +] mypy_path = "./stubs" [[tool.mypy.overrides]] From a8694de76a7c49343108cda0f89062c8043c99ff Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Wed, 29 Jan 2025 06:29:18 +0000 Subject: [PATCH 43/52] chore: clarify ambiguously-defined variable --- test/denokv_testing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 7954aa5..18d9df5 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -368,10 +368,12 @@ def atomic_write( key_tuple = unpack(mut.key) key_bytes = pack(unpack(mut.key)) except Exception as e: + key_tuple = None key_bytes = None cause = e if key_bytes != mut.key: raise ValueError(f"Mutation key is not valid: {mut.key!r}") from cause + assert key_tuple is not None expires_at_ms = mut.expire_at_ms if expires_at_ms < 0: From b20372e4ab703037a547bc4871b09eeca969535d Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Fri, 31 Jan 2025 03:39:48 +0000 Subject: [PATCH 44/52] fix: allow CheckFailure with no failed indexes The denokv self-hosted implementation does not return indexes of failed checks, so we need to allow this. The spec does not say that servers MUST report indexes of failed checks, only that clients SHOULD report failed indexes. See: https://github.com/denoland/denokv/issues/110 --- src/denokv/datapath.py | 29 ++++++++++++---------------- test/test_datapath.py | 43 +++++++++++++++++++++++++++++++----------- 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/src/denokv/datapath.py b/src/denokv/datapath.py index a371b85..677385f 100644 --- a/src/denokv/datapath.py +++ b/src/denokv/datapath.py @@ -202,18 +202,19 @@ class CheckFailure(DataPathDenoKvError): all_checks: tuple[Check, ...] """All of the Checks sent with the AtomicWrite.""" - failed_check_indexes: AbstractSet[int] + failed_check_indexes: AbstractSet[int] | None """ The indexes of Checks in all_checks keys whose versionstamp check failed. - The set is sorted with ascending iteration order. + The set is sorted with ascending iteration order. Will be None if the + database does not support reporting which checks failed. """ def __init__( self, message: str, all_checks: Iterable[Check], - failed_check_indexes: Iterable[int], + failed_check_indexes: Iterable[int] | None, *args: object, endpoint: EndpointInfo, ) -> None: @@ -222,12 +223,15 @@ def __init__( self.all_checks = tuple(all_checks) if len(self.all_checks) == 0: raise ValueError("all_checks is empty") - ordered_indexes = sorted(failed_check_indexes) - if len(ordered_indexes) == 0: - raise ValueError("failed_check_indexes is empty") - if ordered_indexes[0] < 0 or ordered_indexes[-1] >= len(self.all_checks): + + ordered_indexes = sorted(failed_check_indexes) if failed_check_indexes else [] + if len(ordered_indexes) > 0 and ( + ordered_indexes[0] < 0 or ordered_indexes[-1] >= len(self.all_checks) + ): raise IndexError("failed_check_indexes contains out-of-bounds index") - self.failed_check_indexes = {i: True for i in ordered_indexes}.keys() + self.failed_check_indexes = ( + {i: True for i in ordered_indexes}.keys() if ordered_indexes else None + ) DataPathError: TypeAlias = Union[ @@ -539,15 +543,6 @@ async def atomic_write( ) err.__cause__ = e return Err(err) - except ValueError as e: - err = ProtocolViolation( - "Server responded to Data Path Atomic Write with " - "CHECK_FAILURE containing no failed checks", - data=write_output, - endpoint=endpoint, - ) - err.__cause__ = e - return Err(err) elif write_output.status == AtomicWriteStatus.AW_WRITE_DISABLED: return Err( EndpointNotUsable( diff --git a/test/test_datapath.py b/test/test_datapath.py index b541d61..ceb71f8 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -41,6 +41,7 @@ from denokv._kv_values import VersionStamp from denokv._pycompat.typing import Awaitable from denokv._pycompat.typing import Callable +from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeAlias @@ -261,7 +262,10 @@ async def violation_atomic_write_check_failure_with_out_of_bounds_index( ).SerializeToString(), ) - async def violation_atomic_write_check_failure_without_failed_checks( + # The denokv self-hosted implementation does not return indexes of failed + # checks. + # https://github.com/denoland/denokv/issues/110 + async def quirk_atomic_write_check_failure_without_failed_checks( request: web.Request, ) -> web.Response: write = AtomicWrite() @@ -363,7 +367,7 @@ def add_datapath_post(app: web.Application, path: str, handler: Handler) -> None ) app.router.add_post( "/check_failure_without_failed_checks/atomic_write", - violation_atomic_write_check_failure_without_failed_checks, + quirk_atomic_write_check_failure_without_failed_checks, ) app.router.add_post("/unusable/atomic_write", unusable_atomic_write) app.router.add_post( @@ -781,10 +785,12 @@ async def test_atomic_write__raises_when_given_endpoint_without_strong_consisten ), ( "/check_failure_without_failed_checks", - lambda endpoint: ProtocolViolation( - "Server responded to Data Path Atomic Write with CHECK_FAILURE " - "containing no failed checks", - data=AtomicWriteOutput(status=AtomicWriteStatus.AW_CHECK_FAILURE), + lambda endpoint: CheckFailure( + "Not all checks required by the Atomic Write passed", + all_checks=[ + Check(key=pack_key(("x",)), versionstamp=bytes(VersionStamp(0))) + ], + failed_check_indexes=[], endpoint=endpoint, ), ), @@ -1385,6 +1391,26 @@ def test_CheckFailure(example_endpoint: EndpointInfo) -> None: assert msg in str(e) +@pytest.mark.parametrize("failed_check_indexes", [None, ()]) +def test_CheckFailure__failed_check_indexes_is_None_when_no_indexes( + failed_check_indexes: Iterable[int] | None, example_endpoint: EndpointInfo +) -> None: + checks = [ + Check(key=bytes(KvKey(f"a{i}")), versionstamp=bytes(VersionStamp(i))) + for i in range(4) + ] + # Failed_check_indexes can be empty (the self-hosted sqlite implementation + # does not return the indexes of failed checks). + e = CheckFailure( + "Foo", + all_checks=iter(checks), + failed_check_indexes=failed_check_indexes, + endpoint=example_endpoint, + ) + assert e.all_checks == tuple(checks) + assert e.failed_check_indexes is None + + def test_CheckFailure__validates_constructor_args( example_endpoint: EndpointInfo, ) -> None: @@ -1395,11 +1421,6 @@ def test_CheckFailure__validates_constructor_args( "Foo", all_checks=[], failed_check_indexes=[], endpoint=example_endpoint ) - with pytest.raises(ValueError, match=r"failed_check_indexes is empty"): - CheckFailure( - "Foo", all_checks=checks, failed_check_indexes=[], endpoint=example_endpoint - ) - with pytest.raises( IndexError, match=r"failed_check_indexes contains out-of-bounds index" ): From 39ee06276cf7868e35f1693e9a5f71b070d1fc46 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 4 Feb 2025 04:03:28 +0000 Subject: [PATCH 45/52] fix: support unknown conflicts in write result types CommittedWrite, ConflictedWrite and FailedWrite now all have a has_unknown_conflicts property to indicate whether the conflicts/failed_checks is populated with the specific checks that failed. This is necessary because databases don't have to report which checks failed. --- src/denokv/_kv_writes.py | 35 +++++++++--- test/test__kv_writes__CommittedWrite.py | 3 + test/test__kv_writes__ConflictedWrite.py | 70 +++++++++++++++++++----- test/test__kv_writes__FailedWrite.py | 1 + test/test__kv_writes__PlannedWrite.py | 2 +- 5 files changed, 89 insertions(+), 22 deletions(-) diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index b4206ba..67f3ad3 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -1279,13 +1279,19 @@ async def write( if isinstance(result.error, CheckFailure): check_failure = result.error return ConflictedWrite( - failed_checks=list(check_failure.failed_check_indexes), + failed_checks=check_failure.failed_check_indexes, checks=checks, mutations=mutations, enqueues=enqueues, endpoint=check_failure.endpoint, + cause=check_failure, ) - raise result.error + raise FailedWrite( + checks=checks, + mutations=mutations, + enqueues=enqueues, + endpoint=result.error.endpoint, + ) from result.error versionstamp, endpoint = result.value return CommittedWrite( @@ -1345,6 +1351,13 @@ def _AnyFailure_marker(self, no_call: Never) -> Never: ... checks: Final[Sequence[CheckRepresentation]] = field() failed_checks: Final[Sequence[int]] = field() + has_unknown_conflicts: Final[bool] = field() + """ + Whether the check(s) that failed are unknown. + + KV servers may or may not report which check(s) failed when a write + fails due to a check conflict. + """ mutations: Final[Sequence[MutationRepresentation]] = field() enqueues: Final[Sequence[EnqueueRepresentation]] = field() endpoint: Final[EndpointInfo] = field() @@ -1365,6 +1378,7 @@ def __init__( # Allow subclass to initialise failed_checks if not hasattr(self, "failed_checks"): self.failed_checks = tuple() # type: ignore[misc] # Cannot assign to final + self.has_unknown_conflicts = False # type: ignore[misc] # Cannot assign to final self.mutations = tuple(mutations) # type: ignore[misc] # Cannot assign to final self.enqueues = tuple(enqueues) # type: ignore[misc] # Cannot assign to final self.endpoint = endpoint # type: ignore[misc] # Cannot assign to final @@ -1399,10 +1413,14 @@ def __repr__(self) -> str: def _normalise_failed_checks( - failed_checks: Iterable[int], check_count: int + failed_checks: Iterable[int], checks: tuple[CheckRepresentation, ...] ) -> tuple[int, ...]: failed_checks = tuple(sorted(failed_checks)) - if failed_checks and (failed_checks[0] < 0 or failed_checks[-1] >= check_count): + # If the server didn't report failed checks and there was only one check, we + # know the single check must have failed, so report that. + if len(failed_checks) == 0 and len(checks) == 1: + return (0,) + if failed_checks and (failed_checks[0] < 0 or failed_checks[-1] >= len(checks)): raise ValueError("failed_checks contains out-of-bounds index") return failed_checks @@ -1410,7 +1428,7 @@ def _normalise_failed_checks( class ConflictedWrite(FailedWrite): def __init__( self, - failed_checks: Iterable[int], + failed_checks: Iterable[int] | None, checks: Iterable[CheckRepresentation], mutations: Iterable[MutationRepresentation], enqueues: Iterable[EnqueueRepresentation], @@ -1420,9 +1438,10 @@ def __init__( ) -> None: _checks = tuple(checks) self.failed_checks = _normalise_failed_checks( # type: ignore[misc] # Cannot assign to final attribute "failed_checks" - failed_checks, - check_count=len(_checks), + failed_checks or [], + checks=_checks, ) + self.has_unknown_conflicts = len(self.failed_checks) == 0 # type: ignore[misc] # Cannot assign to final attribute super(ConflictedWrite, self).__init__( _checks, mutations, enqueues, endpoint, cause=cause ) @@ -1448,6 +1467,7 @@ def _AnySuccess_marker(self, no_call: Never) -> Never: ... ok: Final[Literal[True]] # noqa: PYI064 conflicts: Final[Mapping[KvKey, CheckRepresentation]] # empty + has_unknown_conflicts: Final[Literal[False]] versionstamp: Final[VersionStamp] checks: Final[Sequence[CheckRepresentation]] mutations: Final[Sequence[MutationRepresentation]] @@ -1464,6 +1484,7 @@ def __init__( ) -> None: self.ok = True self.conflicts = EMPTY_MAP + self.has_unknown_conflicts = False self.versionstamp = versionstamp self.checks = tuple(checks) self.mutations = tuple(mutations) diff --git a/test/test__kv_writes__CommittedWrite.py b/test/test__kv_writes__CommittedWrite.py index bc08c42..a9b26ad 100644 --- a/test/test__kv_writes__CommittedWrite.py +++ b/test/test__kv_writes__CommittedWrite.py @@ -51,6 +51,9 @@ def test_constructors() -> None: assert instance.enqueues == (Enqueue("Hi"),) assert instance.endpoint is EP + assert instance.conflicts == {} + assert not instance.has_unknown_conflicts + def test_str_repr() -> None: instance = CommittedWrite( diff --git a/test/test__kv_writes__ConflictedWrite.py b/test/test__kv_writes__ConflictedWrite.py index 4c0d5dd..c8ae35c 100644 --- a/test/test__kv_writes__ConflictedWrite.py +++ b/test/test__kv_writes__ConflictedWrite.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import traceback from datetime import datetime @@ -9,6 +11,9 @@ from denokv._kv_writes import ConflictedWrite from denokv._kv_writes import Enqueue from denokv._kv_writes import Set +from denokv._pycompat.typing import Iterable +from denokv._pycompat.typing import Sequence +from denokv._pycompat.typing import cast from denokv.auth import ConsistencyLevel from denokv.auth import EndpointInfo from denokv.datapath import CheckFailure @@ -20,17 +25,21 @@ @pytest.fixture -def instance() -> ConflictedWrite: +def checks() -> tuple[Check, Check, Check]: + return ( + Check.for_key_not_set(KvKey("a")), + Check.for_key_not_set(KvKey("b")), + Check.for_key_not_set(KvKey("c")), + ) + + +@pytest.fixture +def instance(checks: Iterable[Check]) -> ConflictedWrite: pb_checks = [ datapath_pb2.Check(key=bytes(KvKey("a")), versionstamp=None), datapath_pb2.Check(key=bytes(KvKey("b")), versionstamp=None), datapath_pb2.Check(key=bytes(KvKey("c")), versionstamp=None), ] - checks = [ - Check.for_key_not_set(KvKey("a")), - Check.for_key_not_set(KvKey("b")), - Check.for_key_not_set(KvKey("c")), - ] failed_checks = [0, 2] cause = CheckFailure( @@ -50,15 +59,10 @@ def instance() -> ConflictedWrite: ) -def test_constructor(instance: ConflictedWrite) -> None: - checks = [ - Check.for_key_not_set(KvKey("a")), - Check.for_key_not_set(KvKey("b")), - Check.for_key_not_set(KvKey("c")), - ] +def test_constructor(checks: Sequence[Check]) -> None: instance = ConflictedWrite( failed_checks=[0, 2], - checks=list(checks), + checks=cast(Iterable[Check], checks), mutations=[Set(KvKey("a"), 42)], enqueues=[Enqueue("Hi")], endpoint=EP, @@ -74,10 +78,48 @@ def test_constructor(instance: ConflictedWrite) -> None: assert instance.conflicts == {KvKey("a"): checks[0], KvKey("c"): checks[2]} assert instance.conflicts[KvKey("a")] is checks[0] + +@pytest.mark.parametrize("failed_checks", [None, [], [0]]) +def test_constructor__conflicts_are_always_known_with_single_check( + failed_checks: Iterable[int] | None, +) -> None: + instance = ConflictedWrite( + failed_checks=failed_checks, + checks=iter([Check.for_key_not_set(KvKey("a"))]), + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + ) + + assert KvKey("a") in instance.conflicts + assert instance.conflicts[KvKey("a")].key == KvKey("a") + assert not instance.has_unknown_conflicts + + +@pytest.mark.parametrize("failed_checks", [None, []]) +def test_constructor__conflicts_are_unknown_with_multiple_checks_without_failed_checks( + failed_checks: Iterable[int] | None, checks: Iterable[Check] +) -> None: + instance = ConflictedWrite( + failed_checks=failed_checks, + checks=checks, + mutations=[Set(KvKey("a"), 42)], + enqueues=[Enqueue("Hi")], + endpoint=EP, + ) + + assert len(instance.conflicts) == 0 + assert instance.has_unknown_conflicts + + +def test_constructor__rejects_out_of_bounds_failed_checks( + checks: tuple[Check, Check, Check], +) -> None: + assert len(checks) == 3 with pytest.raises(ValueError, match=r"failed_checks contains out-of-bounds index"): ConflictedWrite( failed_checks=[0, 10], - checks=list(checks), + checks=checks, mutations=[Set(KvKey("a"), 42)], enqueues=[], endpoint=EP, diff --git a/test/test__kv_writes__FailedWrite.py b/test/test__kv_writes__FailedWrite.py index 799fe3b..59f3d94 100644 --- a/test/test__kv_writes__FailedWrite.py +++ b/test/test__kv_writes__FailedWrite.py @@ -62,6 +62,7 @@ def test_constructor(cause: BaseException | None) -> None: assert instance.__cause__ is cause assert instance.conflicts == {} + assert not instance.has_unknown_conflicts def test_exception_attributes(instance: FailedWrite) -> None: diff --git a/test/test__kv_writes__PlannedWrite.py b/test/test__kv_writes__PlannedWrite.py index 5d5005f..5e164ba 100644 --- a/test/test__kv_writes__PlannedWrite.py +++ b/test/test__kv_writes__PlannedWrite.py @@ -211,7 +211,7 @@ async def test_write__handles_unsuccessful_conflicted_write( result = await planned_write.write(kv=writer, v8_encoder=v8_encoder) assert result == ConflictedWrite( - failed_checks=list(error.failed_check_indexes), + failed_checks=error.failed_check_indexes, checks=planned_write.checks, mutations=planned_write.mutations, enqueues=planned_write.enqueues, From 5a09e3f6a357be49d607dec1cd16857b3ba3c33b Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sat, 1 Feb 2025 02:15:17 +0000 Subject: [PATCH 46/52] feat: provide mutation shorthand methods on Kv Kv now has the check(), set(), sum(), min(), max(), delete() and enqueue() methods of PlannedWrite, using the same mixins that define these methods on PlannedWrite. On Kv they execute immediately instead of accumulating operations to execute later. --- src/denokv/kv.py | 45 ++++++++++++- test/test_kv.py | 160 ++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 180 insertions(+), 25 deletions(-) diff --git a/src/denokv/kv.py b/src/denokv/kv.py index a6993c3..156fdb5 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -36,10 +36,19 @@ from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp from denokv._kv_writes import Check +from denokv._kv_writes import CheckMixin +from denokv._kv_writes import CheckRepresentation from denokv._kv_writes import CompletedWrite +from denokv._kv_writes import DeleteMutatorMixin from denokv._kv_writes import Enqueue +from denokv._kv_writes import EnqueueMixin +from denokv._kv_writes import MaxMutatorMixin +from denokv._kv_writes import MinMutatorMixin from denokv._kv_writes import Mutation +from denokv._kv_writes import MutationRepresentation from denokv._kv_writes import PlannedWrite +from denokv._kv_writes import SetMutatorMixin +from denokv._kv_writes import SumMutatorMixin from denokv._kv_writes import WriteOperation from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.typing import Any @@ -88,6 +97,7 @@ from denokv.result import Err from denokv.result import Ok from denokv.result import Result +from denokv.result import is_ok T = TypeVar("T", default=object) # Note that the default arg doesn't seem to work with MyPy yet. The @@ -369,7 +379,17 @@ class KvFlags(Flag): @dataclass(init=False) -class Kv(KvWriter, AbstractAsyncContextManager["Kv", None]): +class Kv( + CheckMixin[Awaitable[bool]], + SetMutatorMixin[Awaitable[VersionStamp]], + SumMutatorMixin[Awaitable[VersionStamp]], + MinMutatorMixin[Awaitable[VersionStamp]], + MaxMutatorMixin[Awaitable[VersionStamp]], + DeleteMutatorMixin[Awaitable[VersionStamp]], + EnqueueMixin[Awaitable[VersionStamp]], + KvWriter, + AbstractAsyncContextManager["Kv", None], +): """ Interface to perform requests against a Deno KV database. @@ -848,6 +868,29 @@ async def write( return await planned_write.write(kv=self, v8_encoder=self.v8_encoder) + @override + async def _check(self, check: CheckRepresentation, /) -> bool: + return is_ok(await self.write(check)) + + @override + async def mutate(self, mutation: MutationRepresentation) -> VersionStamp: + result = await self.write(mutation) + if is_ok(result): + return result.versionstamp + # This is a write conflict which we don't expect to occur, because the + # shortcut mutation methods (like set(), sum(), etc) don't include + # checks. + raise result + + @override + async def _enqueue(self, enqueue: Enqueue, /) -> VersionStamp: + result = await self.write(enqueue) + if is_ok(result): + return result.versionstamp + # This is a write conflict which we don't expect to occur, because the + # enqueue() shortcut doesn't include checks. + raise result + _KvSnapshotReadResult: TypeAlias = Result[ tuple[SnapshotReadOutput, EndpointInfo], DataPathError diff --git a/test/test_kv.py b/test/test_kv.py index 3406fbe..83b279c 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -44,6 +44,8 @@ from denokv._kv_values import VersionStamp from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAY_COUNT from denokv._kv_writes import LIMIT_KVU64 +from denokv._kv_writes import Check +from denokv._kv_writes import FailedWrite from denokv._kv_writes import Limit from denokv._kv_writes import SumArgs from denokv._pycompat.enum import StrEnum @@ -977,15 +979,16 @@ async def test_Kv_write__set_versioned(kv: Kv) -> None: assert entry and entry.value == "Hi" -ErrorPredicate: TypeAlias = Callable[[Exception], bool] +ErrorPredicate: TypeAlias = Callable[[BaseException | None], bool] -def match_client_error(server_msg_content: str) -> Callable[[Exception], bool]: - def is_client_error(e: Exception) -> bool: +def match_client_error(server_msg_content: str) -> ErrorPredicate: + def is_client_error(e: BaseException | None) -> bool: return ( - isinstance(e, ResponseUnsuccessful) - and e.status == 400 - and server_msg_content in e.body_text + isinstance(e, FailedWrite) + and isinstance(e.__cause__, ResponseUnsuccessful) + and e.__cause__.status == 400 + and server_msg_content in e.__cause__.body_text ) return is_client_error @@ -995,6 +998,7 @@ def match_error( kind: type[BaseException], containing: str | None = None, matching: str | re.Pattern[str] | None = None, + cause: ErrorPredicate | None = None, ) -> ErrorPredicate: if containing is not None: if matching is not None: @@ -1003,12 +1007,28 @@ def match_error( elif matching is None: raise ValueError("containing or matching args must be set") - def is_error(e: Exception) -> bool: - return isinstance(e, kind) and bool(re.search(matching, str(e))) + def is_error(e: BaseException | None) -> bool: + return ( + isinstance(e, kind) + and bool(re.search(matching, str(e))) + and (cause is None or cause(e.__cause__)) + ) return is_error +def match_write_failure( + kind: type[BaseException], + containing: str | None = None, + matching: str | re.Pattern[str] | None = None, +) -> ErrorPredicate: + return match_error( + kind=FailedWrite, + containing="", + cause=match_error(kind, containing=containing, matching=matching), + ) + + @asynccontextmanager async def validate_write_outcome( kv: Kv, @@ -1019,6 +1039,9 @@ async def validate_write_outcome( if initial_val is not None: assert is_ok(await kv.atomic().set(("foo", 0), initial_val).write()) + else: + assert is_ok(await kv.atomic().delete(("foo", 0)).write()) + try: yield (kv, ("foo", 0)) assert not match_error, "write succeeded but is expected to fail" @@ -1083,7 +1106,7 @@ async def validate_write_outcome( # fmt: on @_params_test_Kv_write__sum @pytest_mark_asyncio -async def test_Kv_write__sum( +async def test_Kv_write__atomic_sum( kv: Kv, initial_val: int | float | JSBigInt | KvU64 | None, sum_val: int | float | JSBigInt | KvU64, @@ -1095,7 +1118,21 @@ async def test_Kv_write__sum( assert is_ok(await kv.atomic().sum(**sum_args).write()) # type: ignore[arg-type] -@pytest.mark.parametrize( +@_params_test_Kv_write__sum +@pytest_mark_asyncio +async def test_Kv_write__sum( + kv: Kv, + initial_val: int | float | JSBigInt | KvU64 | None, + sum_val: int | float | JSBigInt | KvU64, + sum_kwargs: SumArgs[Any, Any, Any], + result: int | float | KvU64 | Callable[[Exception], bool], +) -> None: + async with validate_write_outcome(kv, initial_val, result) as (kv, key): + sum_args = SumArgs(key=key, delta=sum_val, **sum_kwargs) + assert isinstance(await kv.sum(**sum_args), VersionStamp) # type: ignore[arg-type] + + +_params_test_Kv_write__max = pytest.mark.parametrize( "initial_val, max_val, max_kwargs, result", [ (JSBigInt(12), JSBigInt(3), {}, JSBigInt(12)), @@ -1111,7 +1148,7 @@ async def test_Kv_write__sum( # The errors reference M_SUM because bigint/number implement min/max # using clamped M_SUM operations, not the actual M_MIN/M_MAX, # because they only support u64. - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_SUM, number types are incompatible: " @@ -1123,7 +1160,7 @@ async def test_Kv_write__sum( 1.5, JSBigInt(2), {}, - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_SUM, number types are incompatible: " @@ -1135,7 +1172,7 @@ async def test_Kv_write__sum( KvU64(1), 2.0, {}, - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_SUM, number types are incompatible: " @@ -1147,7 +1184,7 @@ async def test_Kv_write__sum( 2.0, KvU64(1), {}, - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_MAX, number types are incompatible: " @@ -1157,8 +1194,11 @@ async def test_Kv_write__sum( ), ], ) + + +@_params_test_Kv_write__max @pytest_mark_asyncio -async def test_Kv_write__max( +async def test_Kv_write__atomic_max( kv: Kv, initial_val: int | float | KvU64 | None, max_val: KvU64, @@ -1169,7 +1209,20 @@ async def test_Kv_write__max( assert is_ok(await kv.atomic().max(("foo", 0), max_val, **max_kwargs).write()) -@pytest.mark.parametrize( +@_params_test_Kv_write__max +@pytest_mark_asyncio +async def test_Kv_write__max( + kv: Kv, + initial_val: int | float | KvU64 | None, + max_val: KvU64, + max_kwargs: dict[str, Any], + result: int | float | KvU64 | Callable[[Exception], bool], +) -> None: + async with validate_write_outcome(kv, initial_val, result): + assert isinstance(await kv.max(("foo", 0), max_val, **max_kwargs), VersionStamp) + + +_params_test_Kv_write__min = pytest.mark.parametrize( "initial_val, min_val, min_kwargs, result", [ (JSBigInt(12), JSBigInt(3), {}, JSBigInt(3)), @@ -1185,7 +1238,7 @@ async def test_Kv_write__max( # The errors reference M_SUM because bigint/number implement min/max # using clamped M_SUM operations, not the actual M_MIN/M_MAX, # because they only support u64. - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_SUM, number types are incompatible: " @@ -1197,7 +1250,7 @@ async def test_Kv_write__max( 1.5, JSBigInt(2), {}, - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_SUM, number types are incompatible: " @@ -1209,7 +1262,7 @@ async def test_Kv_write__max( KvU64(1), 2.0, {}, - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_SUM, number types are incompatible: " @@ -1221,7 +1274,7 @@ async def test_Kv_write__max( 2.0, KvU64(1), {}, - match_error( + match_write_failure( ResponseUnsuccessful, "SnapshotWrite is not valid: " "Cannot apply operation M_MIN, number types are incompatible: " @@ -1231,8 +1284,11 @@ async def test_Kv_write__max( ), ], ) + + +@_params_test_Kv_write__min @pytest_mark_asyncio -async def test_Kv_write__min( +async def test_Kv_write__atomic_min( kv: Kv, initial_val: int | float | KvU64 | None, min_val: KvU64, @@ -1243,17 +1299,39 @@ async def test_Kv_write__min( assert is_ok(await kv.atomic().min(("foo", 0), min_val, **min_kwargs).write()) +@_params_test_Kv_write__min +@pytest_mark_asyncio +async def test_Kv_write__min( + kv: Kv, + initial_val: int | float | KvU64 | None, + min_val: KvU64, + min_kwargs: dict[str, Any], + result: int | float | KvU64 | Callable[[Exception], bool], +) -> None: + async with validate_write_outcome(kv, initial_val, result): + assert isinstance(await kv.min(("foo", 0), min_val, **min_kwargs), VersionStamp) + + @pytest.mark.parametrize("initial_val", [None, 42]) @pytest_mark_asyncio -async def test_Kv_write__delete( +async def test_Kv_write__atomic_delete( kv: Kv, initial_val: int | float | KvU64 | None ) -> None: async with validate_write_outcome(kv, initial_val, result=None): assert is_ok(await kv.atomic().delete(("foo", 0)).write()) +@pytest.mark.parametrize("initial_val", [None, 42]) @pytest_mark_asyncio -async def test_Kv_write__check__allows_write_when_matching(kv: Kv) -> None: +async def test_Kv_write__delete( + kv: Kv, initial_val: int | float | KvU64 | None +) -> None: + async with validate_write_outcome(kv, initial_val, result=None): + assert isinstance(await kv.delete(("foo", 0)), VersionStamp) + + +@pytest_mark_asyncio +async def test_Kv_write__atomic_check__allows_write_when_matching(kv: Kv) -> None: async with validate_write_outcome(kv, None, result=42) as (kv, key): assert is_ok(await kv.atomic().check(key, None).set(key, 42).write()) @@ -1266,7 +1344,7 @@ async def test_Kv_write__check__allows_write_when_matching(kv: Kv) -> None: @pytest_mark_asyncio -async def test_Kv_write__check__fails_write_when_mismatching(kv: Kv) -> None: +async def test_Kv_write__atomic_check__fails_write_when_mismatching(kv: Kv) -> None: async with validate_write_outcome(kv, None, result=None) as (kv, key): result = await kv.atomic().check(key, VersionStamp(1)).set(key, 42).write() assert is_err(result) @@ -1284,6 +1362,40 @@ async def test_Kv_write__check__fails_write_when_mismatching(kv: Kv) -> None: assert result.conflicts[key].versionstamp == initial.versionstamp +@pytest_mark_asyncio +async def test_Kv_write__check__returns_False_when_mismatching(kv: Kv) -> None: + async with validate_write_outcome(kv, None, result=None) as (kv, key): + assert (await kv.check(key, VersionStamp(1))) is False + assert (await kv.check(KvEntry(key, None, VersionStamp(1)))) is False + assert (await kv.check(Check(key, VersionStamp(1)))) is False + + async with validate_write_outcome(kv, 41, result=41) as (kv, key): + _, initial = await kv.get(key) + assert initial + wrong_ver = VersionStamp(int(initial.versionstamp) + 1) + assert (await kv.check(key)) is False + assert (await kv.check(key, None)) is False + assert (await kv.check(key, wrong_ver)) is False + assert (await kv.check(KvEntry(key, None, wrong_ver))) is False + assert (await kv.check(Check(key, None))) is False + assert (await kv.check(Check(key, wrong_ver))) is False + + +@pytest_mark_asyncio +async def test_Kv_write__check__returns_True_when_matching(kv: Kv) -> None: + async with validate_write_outcome(kv, None, result=None) as (kv, key): + assert (await kv.check(key)) is True + assert (await kv.check(key, None)) is True + assert (await kv.check(Check(key, None))) is True + + async with validate_write_outcome(kv, 41, result=41) as (kv, key): + _, initial = await kv.get(key) + assert initial + assert (await kv.check(key, initial.versionstamp)) is True + assert (await kv.check(KvEntry(key, None, initial.versionstamp))) is True + assert (await kv.check(Check(key, initial.versionstamp))) is True + + @pytest_mark_asyncio async def test_Kv_write__enqueue(kv: Kv, mock_db: MockKvDb) -> None: assert len(mock_db.queued_messages) == 0 From e8431f672c4a911e7541b0692234e495d1e16038 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Sun, 2 Feb 2025 06:53:36 +0000 Subject: [PATCH 47/52] refactor: adjust type annotations to support py39 --- src/denokv/_kv_writes.py | 61 +++++++++++++++++++++++++--------------- src/denokv/kv.py | 18 ++++++------ test/denokv_testing.py | 3 +- test/test_kv.py | 3 +- 4 files changed, 53 insertions(+), 32 deletions(-) diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index 67f3ad3..1f3d02d 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -69,14 +69,15 @@ from denokv.result import is_err KvNumberNameT = TypeVar("KvNumberNameT", bound=str, default=str) -NumberT = TypeVar("NumberT", bound=int | float, default=int | float) +NumberT = TypeVar("NumberT", bound=Union[int, float], default=Union[int, float]) KvNumberTypeT = TypeVar("KvNumberTypeT", default=object) KvNumberNameT_co = TypeVar("KvNumberNameT_co", bound=str, covariant=True, default=str) NumberT_co = TypeVar( - "NumberT_co", bound=int | float, covariant=True, default=int | float + "NumberT_co", bound=Union[int, float], covariant=True, default=Union[int, float] ) KvNumberTypeT_co = TypeVar("KvNumberTypeT_co", covariant=True, default=object) + U = TypeVar("U") MutateResultT = TypeVar("MutateResultT") EnqueueResultT = TypeVar("EnqueueResultT") @@ -721,15 +722,16 @@ def __lt__(self, other: object) -> bool: KvNumber._value2member_map_[int] = KvNumber.float KvNumber._value2member_map_[KvU64] = KvNumber.u64 -BigIntKvNumberIdentifier = Literal["bigint", KvNumber.bigint] | type[JSBigInt] -FloatKvNumberIdentifier = Literal["float", KvNumber.float] | type[float] -U64KvNumberIdentifier = Literal["u64", KvNumber.u64] | type[KvU64] -KvNumberIdentifier = ( - BigIntKvNumberIdentifier - | FloatKvNumberIdentifier - | U64KvNumberIdentifier - | KvNumber -) +BigIntKvNumberIdentifier: TypeAlias = Union[ + Literal["bigint", KvNumber.bigint], type[JSBigInt] +] +FloatKvNumberIdentifier: TypeAlias = Union[ + Literal["float", KvNumber.float], type[float] +] +U64KvNumberIdentifier: TypeAlias = Union[Literal["u64", KvNumber.u64], type[KvU64]] +KvNumberIdentifier: TypeAlias = Union[ + BigIntKvNumberIdentifier, FloatKvNumberIdentifier, U64KvNumberIdentifier, KvNumber +] def encode_v8_number(number: float, /) -> bytes: @@ -957,7 +959,7 @@ def sum( | None = None, **options: Unpack[SumOptions[NumberT]], ) -> MutateResultT: - delta = cast(NumberT | KvNumberTypeT, delta) + delta = cast(Union[NumberT, KvNumberTypeT], delta) number_type = cast( KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type ) @@ -1055,7 +1057,7 @@ def min( | None = None, **options: Unpack[MutationOptions], ) -> MutateResultT: - value = cast(NumberT | KvNumberTypeT, value) + value = cast(Union[NumberT, KvNumberTypeT], value) number_type = cast( KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type ) @@ -1153,7 +1155,7 @@ def max( | None = None, **options: Unpack[MutationOptions], ) -> MutateResultT: - value = cast(NumberT | KvNumberTypeT, value) + value = cast(Union[NumberT, KvNumberTypeT], value) number_type = cast( KvNumberInfo[KvNumberNameT, NumberT, KvNumberTypeT], number_type ) @@ -1229,7 +1231,7 @@ def enqueue( return self._enqueue(enqueue) -@dataclass +@dataclass(init=False) class PlannedWrite( CheckMixin["PlannedWrite"], SetMutatorMixin["PlannedWrite"], @@ -1240,11 +1242,26 @@ class PlannedWrite( EnqueueMixin["PlannedWrite"], AtomicWriteRepresentationWriter["CompletedWrite"], ): - kv: KvWriter | None = field(default=None) - checks: MutableSequence[CheckRepresentation] = field(default_factory=list) - mutations: MutableSequence[MutationRepresentation] = field(default_factory=list) - enqueues: MutableSequence[EnqueueRepresentation] = field(default_factory=list) - v8_encoder: Encoder | None = field(default=None, kw_only=True) + kv: KvWriter | None + checks: MutableSequence[CheckRepresentation] + mutations: MutableSequence[MutationRepresentation] + enqueues: MutableSequence[EnqueueRepresentation] + v8_encoder: Encoder | None + + def __init__( + self, + kv: KvWriter | None = None, + checks: MutableSequence[CheckRepresentation] | None = None, + mutations: MutableSequence[MutationRepresentation] | None = None, + enqueues: MutableSequence[EnqueueRepresentation] | None = None, + *, + v8_encoder: Encoder | None = None, + ) -> None: + self.kv = kv + self.checks = list(checks or ()) + self.mutations = list(mutations or ()) + self.enqueues = list(enqueues or ()) + self.v8_encoder = v8_encoder @override async def write( @@ -1745,10 +1762,10 @@ def _resolve_number_value_type( number_identifier: KvNumberIdentifier = number_type resolved_number_type = KvNumber.resolve(number_identifier).value # pyright: ignore[reportAssignmentType] else: - known_number = cast(KvU64 | JSBigInt | float, value) + known_number = cast(Union[KvU64, JSBigInt, float], value) resolved_number_type = KvNumber.resolve(number=known_number).value # pyright: ignore[reportAssignmentType] - resolved_value = cast(KvNumberTypeT | NumberT, value) + resolved_value = cast(Union[KvNumberTypeT, NumberT], value) return ( resolved_number_type.as_py_number(resolved_value), diff --git a/src/denokv/kv.py b/src/denokv/kv.py index 156fdb5..8b463a2 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -11,7 +11,6 @@ from enum import auto from functools import partial from os import environ -from types import EllipsisType from types import TracebackType from typing import Literal from typing import overload @@ -51,6 +50,8 @@ from denokv._kv_writes import SumMutatorMixin from denokv._kv_writes import WriteOperation from denokv._pycompat.dataclasses import slots_if310 +from denokv._pycompat.types import NotSet +from denokv._pycompat.types import NotSetType from denokv._pycompat.typing import Any from denokv._pycompat.typing import AsyncIterator from denokv._pycompat.typing import Awaitable @@ -66,6 +67,7 @@ from denokv._pycompat.typing import TypedDict from denokv._pycompat.typing import TypeVar from denokv._pycompat.typing import TypeVarTuple +from denokv._pycompat.typing import Union from denokv._pycompat.typing import Unpack from denokv._pycompat.typing import override from denokv.asyncio import loop_time @@ -834,12 +836,12 @@ async def write( self, arg: AtomicWriteRepresentationWriter[WriteResultT] | WriteOperation - | EllipsisType = ..., # ... is a sentinel to detect 0 args + | NotSetType = NotSet, # NotSet is a sentinel to detect 0 args *args: WriteOperation, protobuf_atomic_write: dp_protobuf.AtomicWrite | None = None, ) -> CompletedWrite | WriteResultT | KvWriterWriteResult: if protobuf_atomic_write is not None: - if arg is not ... or len(args) > 0: + if arg is not NotSet or len(args) > 0: raise TypeError( "Kv.write() got an unexpected positional argument with " "keyword argument 'protobuf_atomic_write'" @@ -848,11 +850,11 @@ async def write( return await self._atomic_write(protobuf_atomic_write) planned_write: PlannedWrite | AtomicWriteRepresentationWriter[WriteResultT] - if arg is ...: - # arg is ... when 0 args were passed, which is OK (no operations). - # But ... when args are provided means it was passed explicitly. + if arg is NotSet: + # arg is NotSet when 0 args were passed, which is OK (no operations). + # But NotSet when args are provided means it was passed explicitly. if args: - raise TypeError("Kv.write() got an unexpected '...'") + raise TypeError("Kv.write() got an unexpected 'NotSet'") # Note that it's OK to submit a write with no operations. We get a # versionstamp back. Submitting a write with only checks could be # used to check if a key has been changed without reading the value. @@ -896,7 +898,7 @@ async def _enqueue(self, enqueue: Enqueue, /) -> VersionStamp: tuple[SnapshotReadOutput, EndpointInfo], DataPathError ] _KvAtomicWriteResult: TypeAlias = Result[ - tuple[VersionStamp, EndpointInfo], CheckFailure | DataPathError + tuple[VersionStamp, EndpointInfo], Union[CheckFailure, DataPathError] ] diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 18d9df5..9f2701f 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -60,6 +60,7 @@ from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeIs from denokv._pycompat.typing import TypeVar +from denokv._pycompat.typing import Union from denokv._pycompat.typing import cast from denokv.auth import ConsistencyLevel from denokv.auth import DatabaseMetadata @@ -595,7 +596,7 @@ def decode_v8_number(data: bytes) -> tuple[KvNumber, int | float]: if type(value) is JSBigInt: return KvNumber.bigint, value if type(value) in (int, float): - return KvNumber.float, cast(int | float, value) + return KvNumber.float, cast(Union[int, float], value) raise ValueError("V8-serialized value is not a BigInt or Number") diff --git a/test/test_kv.py b/test/test_kv.py index 83b279c..8b9cc6b 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -56,6 +56,7 @@ from denokv._pycompat.typing import Generator from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import TypeAlias +from denokv._pycompat.typing import Union from denokv._pycompat.typing import cast from denokv.asyncio import loop_time from denokv.auth import ConsistencyLevel @@ -979,7 +980,7 @@ async def test_Kv_write__set_versioned(kv: Kv) -> None: assert entry and entry.value == "Hi" -ErrorPredicate: TypeAlias = Callable[[BaseException | None], bool] +ErrorPredicate: TypeAlias = Callable[[Union[BaseException, None]], bool] def match_client_error(server_msg_content: str) -> ErrorPredicate: From 3fde257d356844e90d14c81868a7920d81d17d05 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Mon, 3 Feb 2025 04:24:50 +0000 Subject: [PATCH 48/52] refactor: use parse_rfc3339_datetime in tests Python 3.9's datetime.fromisoformat() does not parse datetimes with a UTC Z indicator. --- test/test__kv_writes__CommittedWrite.py | 5 ++--- test/test__kv_writes__ConflictedWrite.py | 4 ++-- test/test__kv_writes__Delete.py | 5 ++--- test/test__kv_writes__Enqueue.py | 4 ++-- test/test__kv_writes__FailedWrite.py | 4 ++-- test/test__kv_writes__Max.py | 4 ++-- test/test__kv_writes__Min.py | 4 ++-- test/test__kv_writes__PlannedWrite.py | 4 ++-- test/test__kv_writes__Set.py | 5 ++--- test/test__kv_writes__Sum.py | 3 ++- 10 files changed, 20 insertions(+), 22 deletions(-) diff --git a/test/test__kv_writes__CommittedWrite.py b/test/test__kv_writes__CommittedWrite.py index a9b26ad..1978664 100644 --- a/test/test__kv_writes__CommittedWrite.py +++ b/test/test__kv_writes__CommittedWrite.py @@ -1,7 +1,5 @@ from __future__ import annotations -from datetime import datetime - from yarl import URL from denokv._kv_values import VersionStamp @@ -9,12 +7,13 @@ from denokv._kv_writes import CommittedWrite from denokv._kv_writes import Enqueue from denokv._kv_writes import Set +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.auth import ConsistencyLevel from denokv.auth import EndpointInfo from denokv.kv_keys import KvKey from denokv.result import is_ok -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) diff --git a/test/test__kv_writes__ConflictedWrite.py b/test/test__kv_writes__ConflictedWrite.py index c8ae35c..c0587d3 100644 --- a/test/test__kv_writes__ConflictedWrite.py +++ b/test/test__kv_writes__ConflictedWrite.py @@ -1,7 +1,6 @@ from __future__ import annotations import traceback -from datetime import datetime import pytest from yarl import URL @@ -14,13 +13,14 @@ from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import cast +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.auth import ConsistencyLevel from denokv.auth import EndpointInfo from denokv.datapath import CheckFailure from denokv.kv_keys import KvKey from denokv.result import is_err -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() # noqa: F821 EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) diff --git a/test/test__kv_writes__Delete.py b/test/test__kv_writes__Delete.py index 88d330a..e784086 100644 --- a/test/test__kv_writes__Delete.py +++ b/test/test__kv_writes__Delete.py @@ -1,12 +1,11 @@ -from datetime import datetime - from v8serialize import Encoder from denokv import _datapath_pb2 as datapath_pb2 from denokv._kv_writes import Delete +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() def test_constructors() -> None: diff --git a/test/test__kv_writes__Enqueue.py b/test/test__kv_writes__Enqueue.py index f63acf5..2bb286d 100644 --- a/test/test__kv_writes__Enqueue.py +++ b/test/test__kv_writes__Enqueue.py @@ -1,4 +1,3 @@ -from datetime import datetime from itertools import count from itertools import islice @@ -8,9 +7,10 @@ from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAY_COUNT from denokv._kv_writes import DEFAULT_ENQUEUE_RETRY_DELAYS from denokv._kv_writes import Enqueue +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() def test_constructors() -> None: diff --git a/test/test__kv_writes__FailedWrite.py b/test/test__kv_writes__FailedWrite.py index 59f3d94..cc9aa31 100644 --- a/test/test__kv_writes__FailedWrite.py +++ b/test/test__kv_writes__FailedWrite.py @@ -1,7 +1,6 @@ from __future__ import annotations import traceback -from datetime import datetime import pytest from yarl import URL @@ -10,13 +9,14 @@ from denokv._kv_writes import Enqueue from denokv._kv_writes import FailedWrite from denokv._kv_writes import Set +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.auth import ConsistencyLevel from denokv.auth import EndpointInfo from denokv.datapath import ProtocolViolation from denokv.kv_keys import KvKey from denokv.result import is_err -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) diff --git a/test/test__kv_writes__Max.py b/test/test__kv_writes__Max.py index 15b52be..40b46a7 100644 --- a/test/test__kv_writes__Max.py +++ b/test/test__kv_writes__Max.py @@ -1,5 +1,4 @@ import builtins -from datetime import datetime from typing import Literal import pytest @@ -18,10 +17,11 @@ from denokv._pycompat.typing import NewType from denokv._pycompat.typing import assert_type from denokv._pycompat.typing import cast +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey from test.denokv_testing import typeval -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() k = KvKey("a") diff --git a/test/test__kv_writes__Min.py b/test/test__kv_writes__Min.py index fd7b348..692a420 100644 --- a/test/test__kv_writes__Min.py +++ b/test/test__kv_writes__Min.py @@ -1,5 +1,4 @@ import builtins -from datetime import datetime from typing import Literal import pytest @@ -18,10 +17,11 @@ from denokv._pycompat.typing import NewType from denokv._pycompat.typing import assert_type from denokv._pycompat.typing import cast +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey from test.denokv_testing import typeval -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() k = KvKey("a") diff --git a/test/test__kv_writes__PlannedWrite.py b/test/test__kv_writes__PlannedWrite.py index 5e164ba..6a0e116 100644 --- a/test/test__kv_writes__PlannedWrite.py +++ b/test/test__kv_writes__PlannedWrite.py @@ -1,7 +1,6 @@ from __future__ import annotations import re -from datetime import datetime from unittest.mock import create_autospec import pytest @@ -28,6 +27,7 @@ from denokv._kv_writes import PlannedWrite from denokv._kv_writes import Sum from denokv._pycompat.typing import TypedDict +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.auth import ConsistencyLevel from denokv.auth import EndpointInfo from denokv.datapath import AutoRetry @@ -64,7 +64,7 @@ async def test_as_protobuf( datapath_pb2.AtomicWrite(), ) - T1 = datetime.fromisoformat("2000-01-01T00:00:00Z") + T1 = parse_rfc3339_datetime("2000-01-01T00:00:00Z").value_or_raise() planned_write_start = PlannedWrite() planned_write = ( diff --git a/test/test__kv_writes__Set.py b/test/test__kv_writes__Set.py index 068747c..eb67f4f 100644 --- a/test/test__kv_writes__Set.py +++ b/test/test__kv_writes__Set.py @@ -1,13 +1,12 @@ -from datetime import datetime - from v8serialize import Encoder from denokv import _datapath_pb2 as datapath_pb2 from denokv._kv_values import KvU64 from denokv._kv_writes import Set +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() def test_constructors() -> None: diff --git a/test/test__kv_writes__Sum.py b/test/test__kv_writes__Sum.py index b0da85d..710857d 100644 --- a/test/test__kv_writes__Sum.py +++ b/test/test__kv_writes__Sum.py @@ -33,6 +33,7 @@ from denokv._pycompat.typing import NewType from denokv._pycompat.typing import assert_type from denokv._pycompat.typing import cast +from denokv._rfc3339 import parse_rfc3339_datetime from denokv.datapath import read_range_single from denokv.kv_keys import KvKey from denokv.result import Err @@ -45,7 +46,7 @@ from test.denokv_testing import typeval from test.denokv_testing import unsafe_parse_protobuf_kv_entry -T1 = datetime.fromisoformat("2000-01-02T03:04:05.6Z") +T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() u64 = st.integers(min_value=0, max_value=KvU64.RANGE.stop - 1) neg_u64 = st.integers(min_value=-(KvU64.RANGE.stop - 1), max_value=0) From 45f1517215e6860c5e2e334e208ef119e2fdbcad Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 4 Feb 2025 03:50:52 +0000 Subject: [PATCH 49/52] fix: prevent __dict__ creation on types with slots Some mixin/interface types did not have empty __slots__ defined, which caused types using slots to still have dicts. --- src/denokv/_kv_values.py | 2 + src/denokv/_pycompat/dataclasses.py | 2 + src/denokv/datapath.py | 2 + src/denokv/kv.py | 8 ++-- src/denokv/kv_keys.py | 4 ++ src/denokv/result.py | 4 ++ test/denokv_testing.py | 13 ++++++ test/test__kv_values.py | 20 +++++++++ test/test__kv_writes__Check.py | 10 +++++ test/test__kv_writes__CommittedWrite.py | 12 +++++ test/test__kv_writes__Delete.py | 10 +++++ test/test__kv_writes__Enqueue.py | 10 +++++ test/test__kv_writes__Limit.py | 10 +++++ test/test__kv_writes__Max.py | 9 ++++ test/test__kv_writes__Min.py | 9 ++++ test/test__kv_writes__Set.py | 10 +++++ test/test__kv_writes__Sum.py | 9 ++++ test/test_auth.py | 36 +++++++++++++++ test/test_kv.py | 59 +++++++++++++++++++++++++ test/test_kv_keys__kvkey.py | 11 +++-- test/test_kv_keys__kvkeyrange.py | 18 ++++++++ test/test_result.py | 34 +++++++------- 22 files changed, 276 insertions(+), 26 deletions(-) diff --git a/src/denokv/_kv_values.py b/src/denokv/_kv_values.py index 138a5da..e0ce994 100644 --- a/src/denokv/_kv_values.py +++ b/src/denokv/_kv_values.py @@ -51,6 +51,8 @@ class VersionStamp(bytes): '00000000000000ff0000' """ + __slots__ = () + RANGE: ClassVar = range(0, 2**80) def __new__(cls, value: str | bytes | int) -> Self: diff --git a/src/denokv/_pycompat/dataclasses.py b/src/denokv/_pycompat/dataclasses.py index 3767436..387cf8b 100644 --- a/src/denokv/_pycompat/dataclasses.py +++ b/src/denokv/_pycompat/dataclasses.py @@ -51,6 +51,8 @@ class FrozenAfterInitDataclass: doesn't affect non-dataclass fields, such as typing.Generic's dunder fields. """ + __slots__ = () + def __delattr__(self, name: str) -> None: if name in (f.name for f in dataclass_fields(self)): raise FrozenInstanceError(f"cannot delete field {name}") diff --git a/src/denokv/datapath.py b/src/denokv/datapath.py index 677385f..d444cc6 100644 --- a/src/denokv/datapath.py +++ b/src/denokv/datapath.py @@ -84,6 +84,8 @@ def kv_key_bytes(self) -> bytes: ... @runtime_checkable class KvKeyRangeEncodable(Container[AnyKvKey], Protocol): + __slots__ = () + def kv_key_range_bytes(self) -> tuple[bytes, bytes]: ... diff --git a/src/denokv/kv.py b/src/denokv/kv.py index 8b463a2..31f4ec7 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -213,7 +213,7 @@ class KvCredentials: access_token: str -@dataclass +@dataclass(frozen=True, **slots_if310()) class Authenticator: """ Authenticates with a KV database server and returns its metadata. @@ -902,7 +902,7 @@ async def _enqueue(self, enqueue: Enqueue, /) -> VersionStamp: ] -@dataclass(frozen=True) +@dataclass(frozen=True, **slots_if310()) class ListContext: prefix: AnyKvKey | None start: AnyKvKey | None @@ -925,12 +925,14 @@ def __post_init__(self) -> None: class AnyCursorFormat(Protocol): + __slots__ = () + def get_key_for_cursor(self, cursor: str) -> Result[KvKeyTuple, InvalidCursor]: ... def get_cursor_for_key(self, key: AnyKvKey) -> Result[str, ValueError]: ... -@dataclass(frozen=True) +@dataclass(frozen=True, **slots_if310()) class Base64KeySuffixCursorFormat(AnyCursorFormat): r""" A cursor format that encodes keys as URL-safe base64. diff --git a/src/denokv/kv_keys.py b/src/denokv/kv_keys.py index 0221c5d..c39c996 100644 --- a/src/denokv/kv_keys.py +++ b/src/denokv/kv_keys.py @@ -333,6 +333,8 @@ def __repr__(self) -> str: class Include(_KeyBoundary[AnyKvKeyT_co]): """KvKeyRange boundary that includes its key in the range.""" + __slots__ = () + if TYPE_CHECKING: # For some reason mypy only infers types of Pieces using new not init @overload @@ -351,6 +353,7 @@ def range(self) -> KvKeyRange[Self, Self]: class IncludePrefix(_KeyBoundary[AnyKvKeyT_co]): """KvKeyRange boundary that includes keys prefixed by its key in the range.""" + __slots__ = () if TYPE_CHECKING: # For some reason mypy only infers types of Pieces using new not init @overload @@ -388,6 +391,7 @@ def range(self) -> KvKeyRange[Include[AnyKvKeyT_co], Self]: class Exclude(_KeyBoundary[AnyKvKeyT_co]): """KvKeyRange boundary that excludes its key from the range.""" + __slots__ = () if TYPE_CHECKING: # For some reason mypy only infers types of Pieces using new not init @overload diff --git a/src/denokv/result.py b/src/denokv/result.py index 02e2eef..a387d27 100644 --- a/src/denokv/result.py +++ b/src/denokv/result.py @@ -29,11 +29,15 @@ @runtime_checkable class AnySuccess(Protocol, metaclass=ABCMeta): + __slots__ = () + def _AnySuccess_marker(self, no_call: Never) -> Never: ... @runtime_checkable class AnyFailure(Protocol, metaclass=ABCMeta): + __slots__ = () + def _AnyFailure_marker(self, no_call: Never) -> Never: ... diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 9f2701f..4925e97 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -13,10 +13,12 @@ from datetime import timedelta from itertools import groupby from typing import Literal +from typing import Never from typing import overload from unittest.mock import Mock from uuid import UUID +import pytest import v8serialize import v8serialize.encode from aiohttp import web @@ -879,3 +881,14 @@ def nextafter(x: float, y: float, *, steps: int = 1) -> float: def typeval(value: T) -> tuple[type[T], T]: return type(value), value + + +def create_dataclass_slots_test() -> Callable[[Never], None]: + @pytest.mark.skipif( + sys.version_info < (3, 10), reason="<3.10 does not use slots for dataclass" + ) + def test_instances_dont_have_dict_because_of_slots(instance: object) -> None: + with pytest.raises(AttributeError): + _ = instance.__dict__ + + return test_instances_dont_have_dict_because_of_slots diff --git a/test/test__kv_values.py b/test/test__kv_values.py index 9b7305b..3e67731 100644 --- a/test/test__kv_values.py +++ b/test/test__kv_values.py @@ -1,10 +1,30 @@ from __future__ import annotations +import pytest from hypothesis import given from hypothesis import strategies as st +from denokv._kv_values import KvEntry from denokv._kv_values import KvU64 from denokv._kv_values import VersionStamp +from denokv._pycompat.typing import Callable +from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test + + +@pytest.fixture( + params=[ + pytest.param(lambda: KvEntry(KvKey("a"), 42, VersionStamp(1)), id="KvEntry"), + pytest.param(lambda: VersionStamp(1), id="VersionStamp"), + pytest.param(lambda: KvU64(1), id="KvU64"), + ] +) +def instance(request: pytest.FixtureRequest) -> object: + param: Callable[[], object] = request.param + return param() + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() @given(v=st.integers(min_value=0, max_value=2**80 - 1)) diff --git a/test/test__kv_writes__Check.py b/test/test__kv_writes__Check.py index 4d4054f..b70bf36 100644 --- a/test/test__kv_writes__Check.py +++ b/test/test__kv_writes__Check.py @@ -1,9 +1,19 @@ +import pytest from v8serialize import Encoder from denokv import _datapath_pb2 as datapath_pb2 from denokv._kv_values import VersionStamp from denokv._kv_writes import Check from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test + + +@pytest.fixture +def instance() -> Check: + return Check(KvKey("a"), None) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() def test_constructors() -> None: diff --git a/test/test__kv_writes__CommittedWrite.py b/test/test__kv_writes__CommittedWrite.py index 1978664..b30869d 100644 --- a/test/test__kv_writes__CommittedWrite.py +++ b/test/test__kv_writes__CommittedWrite.py @@ -1,5 +1,6 @@ from __future__ import annotations +import pytest from yarl import URL from denokv._kv_values import VersionStamp @@ -12,11 +13,22 @@ from denokv.auth import EndpointInfo from denokv.kv_keys import KvKey from denokv.result import is_ok +from test.denokv_testing import create_dataclass_slots_test T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() EP = EndpointInfo(URL("https://example.com/"), consistency=ConsistencyLevel.STRONG) +@pytest.fixture +def instance() -> CommittedWrite: + return CommittedWrite( + VersionStamp(1), checks=[], mutations=[], enqueues=[], endpoint=EP + ) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_is_AnySuccess() -> None: assert is_ok( CommittedWrite( diff --git a/test/test__kv_writes__Delete.py b/test/test__kv_writes__Delete.py index e784086..d023cb9 100644 --- a/test/test__kv_writes__Delete.py +++ b/test/test__kv_writes__Delete.py @@ -1,13 +1,23 @@ +import pytest from v8serialize import Encoder from denokv import _datapath_pb2 as datapath_pb2 from denokv._kv_writes import Delete from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() +@pytest.fixture +def instance() -> Delete: + return Delete(KvKey("a")) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_constructors() -> None: instance = Delete(KvKey("a")) assert instance.key == KvKey("a") diff --git a/test/test__kv_writes__Enqueue.py b/test/test__kv_writes__Enqueue.py index 2bb286d..2f2b1cb 100644 --- a/test/test__kv_writes__Enqueue.py +++ b/test/test__kv_writes__Enqueue.py @@ -1,6 +1,7 @@ from itertools import count from itertools import islice +import pytest from v8serialize import Encoder from denokv import _datapath_pb2 as datapath_pb2 @@ -9,10 +10,19 @@ from denokv._kv_writes import Enqueue from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() +@pytest.fixture +def instance() -> Enqueue: + return Enqueue(42) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_constructors() -> None: message = {"msg": "Hi"} instance = Enqueue(message) diff --git a/test/test__kv_writes__Limit.py b/test/test__kv_writes__Limit.py index 194da19..a0e2d8b 100644 --- a/test/test__kv_writes__Limit.py +++ b/test/test__kv_writes__Limit.py @@ -1,5 +1,6 @@ from __future__ import annotations +import pytest from v8serialize.constants import FLOAT64_SAFE_INT_RANGE from v8serialize.jstypes import JSBigInt @@ -8,6 +9,15 @@ from denokv._kv_writes import LIMIT_UNLIMITED from denokv._kv_writes import Limit from denokv._kv_writes import LimitExceededPolicy +from test.denokv_testing import create_dataclass_slots_test + + +@pytest.fixture +def instance() -> Limit: + return Limit(1, 5, "clamp") + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() def test_constructor() -> None: diff --git a/test/test__kv_writes__Max.py b/test/test__kv_writes__Max.py index 40b46a7..1767820 100644 --- a/test/test__kv_writes__Max.py +++ b/test/test__kv_writes__Max.py @@ -19,12 +19,21 @@ from denokv._pycompat.typing import cast from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test from test.denokv_testing import typeval T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() k = KvKey("a") +@pytest.fixture +def instance() -> Max: + return Max(k, 9, KvNumber.float) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_init__float() -> None: float_max = Max(k, 9, KvNumber.float.value) assert float_max.key is k diff --git a/test/test__kv_writes__Min.py b/test/test__kv_writes__Min.py index 692a420..d039e24 100644 --- a/test/test__kv_writes__Min.py +++ b/test/test__kv_writes__Min.py @@ -19,12 +19,21 @@ from denokv._pycompat.typing import cast from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test from test.denokv_testing import typeval T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() k = KvKey("a") +@pytest.fixture +def instance() -> Min: + return Min(k, 9, KvNumber.float) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_init__float() -> None: float_min = Min(k, 9, KvNumber.float.value) assert float_min.key is k diff --git a/test/test__kv_writes__Set.py b/test/test__kv_writes__Set.py index eb67f4f..f264b4b 100644 --- a/test/test__kv_writes__Set.py +++ b/test/test__kv_writes__Set.py @@ -1,3 +1,4 @@ +import pytest from v8serialize import Encoder from denokv import _datapath_pb2 as datapath_pb2 @@ -5,10 +6,19 @@ from denokv._kv_writes import Set from denokv._rfc3339 import parse_rfc3339_datetime from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test T1 = parse_rfc3339_datetime("2000-01-02T03:04:05.6Z").value_or_raise() +@pytest.fixture +def instance() -> Set: + return Set(KvKey("a"), "foo") + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_constructors() -> None: value = {"foo": "bar"} instance = Set(KvKey("a"), value) diff --git a/test/test__kv_writes__Sum.py b/test/test__kv_writes__Sum.py index 710857d..d434daf 100644 --- a/test/test__kv_writes__Sum.py +++ b/test/test__kv_writes__Sum.py @@ -43,6 +43,7 @@ from test.denokv_testing import MockKvDb from test.denokv_testing import SumLimitExceeded from test.denokv_testing import add_entries +from test.denokv_testing import create_dataclass_slots_test from test.denokv_testing import typeval from test.denokv_testing import unsafe_parse_protobuf_kv_entry @@ -52,6 +53,14 @@ neg_u64 = st.integers(min_value=-(KvU64.RANGE.stop - 1), max_value=0) +@pytest.fixture +def instance() -> Sum: + return Sum(KvKey("a"), 1) + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + def test_init__limits() -> None: sum1 = Sum(KvKey("a"), 1) assert sum1.limit == LIMIT_UNLIMITED diff --git a/test/test_auth.py b/test/test_auth.py index 00de3a9..e430165 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -2,6 +2,7 @@ import json from copy import deepcopy +from datetime import datetime from uuid import UUID import aiohttp @@ -30,12 +31,47 @@ from denokv.auth import read_metadata_exchange_response from test.denokv_testing import assume_err from test.denokv_testing import assume_ok +from test.denokv_testing import create_dataclass_slots_test TestClient: TypeAlias = _TestClient[web.Request, web.Application] pytest_mark_asyncio = pytest.mark.asyncio() +@pytest.fixture( + params=[ + pytest.param( + lambda: DatabaseMetadata( + version=2, + database_id=UUID("AD50A341-5351-4FC3-82D0-72CFEE369A09"), + token="thisisnotasecret", + expires_at=datetime.now(), + endpoints=( + EndpointInfo( + url=URL("https://db.example.com/v2"), + consistency=ConsistencyLevel.STRONG, + ), + ), + ), + id="DatabaseMetadata", + ), + pytest.param( + lambda: EndpointInfo( + url=URL("https://db.example.com/v2"), + consistency=ConsistencyLevel.STRONG, + ), + id="EndpointInfo", + ), + ] +) +def instance(request: pytest.FixtureRequest) -> object: + param: Callable[[], object] = request.param + return param() + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() + + @pytest.fixture def valid_metadata_exchange_response() -> dict[str, object]: return { diff --git a/test/test_kv.py b/test/test_kv.py index 8b9cc6b..d334b4a 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -77,12 +77,16 @@ from denokv.errors import InvalidCursor from denokv.kv import Authenticator from denokv.kv import AuthenticatorFn +from denokv.kv import Base64KeySuffixCursorFormat from denokv.kv import CachedValue from denokv.kv import DatabaseMetadataCache from denokv.kv import EndpointSelector from denokv.kv import Kv +from denokv.kv import KvCredentials from denokv.kv import KvFlags from denokv.kv import KvListOptions +from denokv.kv import ListContext +from denokv.kv import ListKvEntry from denokv.kv import OpenKvFinalize from denokv.kv import normalize_key from denokv.kv import open_kv @@ -97,6 +101,7 @@ from test.denokv_testing import MockKvDb from test.denokv_testing import add_entries from test.denokv_testing import assume_ok +from test.denokv_testing import create_dataclass_slots_test from test.denokv_testing import make_database_metadata from test.denokv_testing import mock_db_api from test.denokv_testing import unsafe_parse_protobuf_kv_entry @@ -1621,3 +1626,57 @@ async def all_inner_tasks_awaited() -> AsyncGenerator[None]: inner_tasks = asyncio.all_tasks() - pre_existing_tasks if inner_tasks: await asyncio.wait(inner_tasks) + + +LIST_CONTEXT = ListContext( + None, + None, + None, + b"", + b"", + None, + None, + False, + ConsistencyLevel.STRONG, + 1, + lambda lc: Base64KeySuffixCursorFormat(lc.packed_start, lc.packed_end), +) + +DB_META = DatabaseMetadata( + version=2, + database_id=UUID("AD50A341-5351-4FC3-82D0-72CFEE369A09"), + token="thisisnotasecret", + expires_at=datetime.now(), + endpoints=( + EndpointInfo( + url=URL("https://db.example.com/v2"), + consistency=ConsistencyLevel.STRONG, + ), + ), +) + + +@pytest.fixture( + params=[ + pytest.param( + ListKvEntry(KvKey("a"), 42, VersionStamp(1), LIST_CONTEXT), id="ListKvEntry" + ), + pytest.param(EndpointSelector(DB_META), id="EndpointSelector"), + pytest.param(CachedValue(fresh_until=42, value=42), id="CachedValue"), + pytest.param(KvCredentials(URL("http://example"), ""), id="KvCredentials"), + pytest.param( + Authenticator(cast(Any, None), cast(Any, None), cast(Any, None)), + id="Authenticator", + ), + pytest.param(LIST_CONTEXT, id="ListContext"), + pytest.param( + Base64KeySuffixCursorFormat(b"", b""), id="Base64KeySuffixCursorFormat" + ), + ] +) +def instance(request: pytest.FixtureRequest) -> object: + param: object = request.param + return param + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() diff --git a/test/test_kv_keys__kvkey.py b/test/test_kv_keys__kvkey.py index b75dd9a..8195081 100644 --- a/test/test_kv_keys__kvkey.py +++ b/test/test_kv_keys__kvkey.py @@ -16,12 +16,15 @@ from denokv.datapath import KvKeyTuple from denokv.datapath import pack_key from denokv.kv_keys import KvKey +from test.denokv_testing import create_dataclass_slots_test -def test_instances_do_not_define_dict() -> None: - k = KvKey() - with pytest.raises(AttributeError): - print(k.__dict__) +@pytest.fixture +def instance() -> KvKey: + return KvKey("a") + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() def test_instances_are_KvKeyEncodable() -> None: diff --git a/test/test_kv_keys__kvkeyrange.py b/test/test_kv_keys__kvkeyrange.py index d8de0d6..7a64a47 100644 --- a/test/test_kv_keys__kvkeyrange.py +++ b/test/test_kv_keys__kvkeyrange.py @@ -11,6 +11,7 @@ from denokv.kv_keys import KvKeyRange from denokv.kv_keys import StartBoundary from denokv.kv_keys import StopBoundary +from test.denokv_testing import create_dataclass_slots_test def test_types() -> None: @@ -192,3 +193,20 @@ def test_contains__stop( key_range = KvKeyRange(IncludeAll(), stop) assert (key in key_range) == key_included + + +@pytest.fixture( + params=[ + pytest.param(Include("b", 10), id="Include"), + pytest.param(IncludePrefix("b", 10), id="IncludePrefix"), + pytest.param(Exclude("b", 10), id="Exclude"), + pytest.param(IncludeAll(), id="IncludeAll"), + pytest.param(KvKeyRange(), id="KvKeyRange"), + ] +) +def instance(request: pytest.FixtureRequest) -> object: + param: object = request.param + return param + + +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() diff --git a/test/test_result.py b/test/test_result.py index 4e9394c..1b1c4c1 100644 --- a/test/test_result.py +++ b/test/test_result.py @@ -1,6 +1,5 @@ from __future__ import annotations -import sys from typing import Literal from unittest.mock import Mock @@ -8,10 +7,12 @@ from denokv._pycompat.typing import TYPE_CHECKING from denokv._pycompat.typing import Any +from denokv._pycompat.typing import Callable from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Never from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeIs +from denokv._pycompat.typing import Union from denokv._pycompat.typing import cast from denokv.result import AnyFailure from denokv.result import AnySuccess @@ -27,17 +28,23 @@ from denokv.result import Some from denokv.result import is_err from denokv.result import is_ok +from test.denokv_testing import create_dataclass_slots_test -@pytest.mark.skipif( - sys.version_info < (3, 10), reason="<3.10 does not use slots for dataclass" +@pytest.fixture( + params=[ + pytest.param(lambda: Some(1), id="Some"), + pytest.param(lambda: Nothing(), id="Nothing"), + pytest.param(lambda: Ok(1), id="Ok"), + pytest.param(lambda: Err("x"), id="Err"), + ] ) -def test_Option__instances_use_slots_to_avoid_dict() -> None: - with pytest.raises(AttributeError): - print(Some(1).__dict__) +def instance(request: pytest.FixtureRequest) -> Option[int] | Result[int, str]: + param: Callable[[], Union[Option[int], Result[int, str]]] = request.param + return param() + - with pytest.raises(AttributeError): - print(Nothing().__dict__) +test_instances_dont_have_dict_because_of_slots = create_dataclass_slots_test() def test_Option__satisfies_OptionMethods() -> None: @@ -201,17 +208,6 @@ def type_check_zip_with(a: Option[str], b: Option[object]) -> Option[int]: return a.zip_with(b, int) # type: ignore[arg-type] -@pytest.mark.skipif( - sys.version_info < (3, 10), reason="<3.10 does not use slots for dataclass" -) -def test_Result__instances_use_slots_to_avoid_dict() -> None: - with pytest.raises(AttributeError): - print(Ok(1).__dict__) - - with pytest.raises(AttributeError): - print(Err("x").__dict__) - - def test_Result__satisfies_ResultMethods() -> None: ok = Ok(1) err = Err("x") From 96c4e05f565db8f326c8d56d64f9d26665d701ce Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 4 Feb 2025 04:29:23 +0000 Subject: [PATCH 50/52] chore: enforce typing import ban with lint rule We now use ruff/flake8-tidy-imports to ban importing typing/typing_extensions. Unfortunately it doesn't seem to support banning everything except allow-listed items, so we have to ignore places that need to import Literal and overload from typing (due to ruff mis-handling them when they're imported from our typing module). --- pyproject.toml | 5 +++++ src/denokv/_kv_writes.py | 4 ++-- src/denokv/_pycompat/dataclasses.py | 6 ++++-- src/denokv/_pycompat/protobuf.py | 2 +- src/denokv/_pycompat/types.py | 2 +- src/denokv/_pycompat/typing.py | 2 ++ src/denokv/backoff.py | 2 +- src/denokv/datapath.py | 2 +- src/denokv/kv.py | 4 ++-- src/denokv/kv_keys.py | 2 +- src/denokv/result.py | 2 +- stubs/fdb/tuple.pyi | 2 ++ test/denokv_testing.py | 6 +++--- test/test__kv_writes__KvNumber.py | 2 +- test/test__kv_writes__Max.py | 2 +- test/test__kv_writes__Min.py | 2 +- test/test__kv_writes__Sum.py | 2 +- test/test_datapath.py | 2 +- test/test_kv.py | 2 +- test/test_kv_keys__kvkey.py | 2 +- test/test_kv_keys__kvkeyrange.py | 2 +- test/test_result.py | 2 +- 22 files changed, 35 insertions(+), 24 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 942641f..b1454b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,6 +87,7 @@ select = [ "FA", # flake8-future-annotations "PYI", # flake8-pyi "I", + "TID", # flake8-tidy-imports ] ignore = [ @@ -102,6 +103,10 @@ ignore = [ "PYI041", ] +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"typing_extensions".msg = "use denokv._pycompat.typing instead, typing_extensions is not a runtime dependency." +"typing".msg = "Use denokv._pycompat.typing instead (apart from overload and Literal), using typing is error-prone as it has many differences between python versions." + [tool.ruff.lint.pydocstyle] convention = "numpy" diff --git a/src/denokv/_kv_writes.py b/src/denokv/_kv_writes.py index 1f3d02d..4defc53 100644 --- a/src/denokv/_kv_writes.py +++ b/src/denokv/_kv_writes.py @@ -9,8 +9,8 @@ from functools import total_ordering from itertools import islice from types import MappingProxyType -from typing import Literal -from typing import overload +from typing import Literal # noqa: TID251 +from typing import overload # noqa: TID251 from v8serialize import Encoder from v8serialize.constants import FLOAT64_SAFE_INT_RANGE diff --git a/src/denokv/_pycompat/dataclasses.py b/src/denokv/_pycompat/dataclasses.py index 387cf8b..cede4a0 100644 --- a/src/denokv/_pycompat/dataclasses.py +++ b/src/denokv/_pycompat/dataclasses.py @@ -4,8 +4,10 @@ from dataclasses import FrozenInstanceError from dataclasses import dataclass from dataclasses import fields as dataclass_fields -from typing import Literal -from typing import TypedDict # avoid circular reference with _pycompat.typing +from typing import Literal # noqa: TID251 + +# avoid circular reference with _pycompat.typing +from typing import TypedDict # noqa: TID251 class NoArg(TypedDict): diff --git a/src/denokv/_pycompat/protobuf.py b/src/denokv/_pycompat/protobuf.py index 4b3afbb..cd11027 100644 --- a/src/denokv/_pycompat/protobuf.py +++ b/src/denokv/_pycompat/protobuf.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import overload +from typing import overload # noqa: TID251 from denokv._datapath_pb2 import AtomicWriteStatus from denokv._datapath_pb2 import MutationType diff --git a/src/denokv/_pycompat/types.py b/src/denokv/_pycompat/types.py index 355f359..e60a52f 100644 --- a/src/denokv/_pycompat/types.py +++ b/src/denokv/_pycompat/types.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Literal +from typing import Literal # noqa: TID251 from denokv._pycompat.typing import TypeAlias diff --git a/src/denokv/_pycompat/typing.py b/src/denokv/_pycompat/typing.py index 3d81df6..bf8941f 100644 --- a/src/denokv/_pycompat/typing.py +++ b/src/denokv/_pycompat/typing.py @@ -6,6 +6,8 @@ without needing if TYPE_CHECKING everywhere. """ +# ruff: noqa: TID251 + from __future__ import annotations from dataclasses import dataclass diff --git a/src/denokv/backoff.py b/src/denokv/backoff.py index 61380f8..fae62c5 100644 --- a/src/denokv/backoff.py +++ b/src/denokv/backoff.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from enum import IntEnum from itertools import count -from typing import Literal +from typing import Literal # noqa: TID251 from denokv._pycompat.typing import Callable from denokv._pycompat.typing import Iterable diff --git a/src/denokv/datapath.py b/src/denokv/datapath.py index d444cc6..c868697 100644 --- a/src/denokv/datapath.py +++ b/src/denokv/datapath.py @@ -9,7 +9,7 @@ from dataclasses import dataclass from enum import Enum from enum import auto -from typing import overload +from typing import overload # noqa: TID251 import aiohttp import aiohttp.client_exceptions diff --git a/src/denokv/kv.py b/src/denokv/kv.py index 31f4ec7..3a1866d 100644 --- a/src/denokv/kv.py +++ b/src/denokv/kv.py @@ -12,8 +12,8 @@ from functools import partial from os import environ from types import TracebackType -from typing import Literal -from typing import overload +from typing import Literal # noqa: TID251 +from typing import overload # noqa: TID251 import aiohttp import v8serialize diff --git a/src/denokv/kv_keys.py b/src/denokv/kv_keys.py index c39c996..a9736fe 100644 --- a/src/denokv/kv_keys.py +++ b/src/denokv/kv_keys.py @@ -4,7 +4,7 @@ import sys from dataclasses import dataclass from dataclasses import field -from typing import overload +from typing import overload # noqa: TID251 from fdb.tuple import pack from fdb.tuple import unpack diff --git a/src/denokv/result.py b/src/denokv/result.py index a387d27..d88b2a4 100644 --- a/src/denokv/result.py +++ b/src/denokv/result.py @@ -2,7 +2,7 @@ from abc import ABCMeta from dataclasses import dataclass -from typing import overload +from typing import overload # noqa: TID251 from denokv._pycompat.dataclasses import slots_if310 from denokv._pycompat.typing import TYPE_CHECKING diff --git a/stubs/fdb/tuple.pyi b/stubs/fdb/tuple.pyi index ba32142..c211cca 100644 --- a/stubs/fdb/tuple.pyi +++ b/stubs/fdb/tuple.pyi @@ -1,3 +1,5 @@ +# ruff: noqa: TID251 + import ctypes from typing import Hashable from uuid import UUID diff --git a/test/denokv_testing.py b/test/denokv_testing.py index 4925e97..f388ffc 100644 --- a/test/denokv_testing.py +++ b/test/denokv_testing.py @@ -12,9 +12,8 @@ from datetime import datetime from datetime import timedelta from itertools import groupby -from typing import Literal -from typing import Never -from typing import overload +from typing import Literal # noqa: TID251 +from typing import overload # noqa: TID251 from unittest.mock import Mock from uuid import UUID @@ -59,6 +58,7 @@ from denokv._pycompat.typing import Iterable from denokv._pycompat.typing import Mapping from denokv._pycompat.typing import NamedTuple +from denokv._pycompat.typing import Never from denokv._pycompat.typing import Sequence from denokv._pycompat.typing import TypeIs from denokv._pycompat.typing import TypeVar diff --git a/test/test__kv_writes__KvNumber.py b/test/test__kv_writes__KvNumber.py index 13c3817..ae4b8c1 100644 --- a/test/test__kv_writes__KvNumber.py +++ b/test/test__kv_writes__KvNumber.py @@ -1,7 +1,7 @@ from __future__ import annotations from dataclasses import FrozenInstanceError -from typing import Literal +from typing import Literal # noqa: TID251 import pytest from v8serialize.jstypes import JSBigInt diff --git a/test/test__kv_writes__Max.py b/test/test__kv_writes__Max.py index 1767820..8256f21 100644 --- a/test/test__kv_writes__Max.py +++ b/test/test__kv_writes__Max.py @@ -1,5 +1,5 @@ import builtins -from typing import Literal +from typing import Literal # noqa: TID251 import pytest from v8serialize import Encoder diff --git a/test/test__kv_writes__Min.py b/test/test__kv_writes__Min.py index d039e24..989cf7c 100644 --- a/test/test__kv_writes__Min.py +++ b/test/test__kv_writes__Min.py @@ -1,5 +1,5 @@ import builtins -from typing import Literal +from typing import Literal # noqa: TID251 import pytest from v8serialize import Encoder diff --git a/test/test__kv_writes__Sum.py b/test/test__kv_writes__Sum.py index d434daf..43ba371 100644 --- a/test/test__kv_writes__Sum.py +++ b/test/test__kv_writes__Sum.py @@ -4,7 +4,7 @@ from datetime import datetime from decimal import Decimal from math import isnan -from typing import Literal +from typing import Literal # noqa: TID251 import pytest from hypothesis import example diff --git a/test/test_datapath.py b/test/test_datapath.py index ceb71f8..00994fd 100644 --- a/test/test_datapath.py +++ b/test/test_datapath.py @@ -3,7 +3,7 @@ import functools import re import struct -from typing import Literal +from typing import Literal # noqa: TID251 import pytest import pytest_asyncio diff --git a/test/test_kv.py b/test/test_kv.py index d334b4a..465c56f 100644 --- a/test/test_kv.py +++ b/test/test_kv.py @@ -10,7 +10,7 @@ from datetime import timedelta from functools import partial from itertools import repeat -from typing import Literal +from typing import Literal # noqa: TID251 from unittest.mock import AsyncMock from unittest.mock import Mock from unittest.mock import patch diff --git a/test/test_kv_keys__kvkey.py b/test/test_kv_keys__kvkey.py index 8195081..5115d3f 100644 --- a/test/test_kv_keys__kvkey.py +++ b/test/test_kv_keys__kvkey.py @@ -2,7 +2,7 @@ import re import weakref -from typing import Literal +from typing import Literal # noqa: TID251 import pytest from fdb.tuple import pack diff --git a/test/test_kv_keys__kvkeyrange.py b/test/test_kv_keys__kvkeyrange.py index 7a64a47..f2fd3d6 100644 --- a/test/test_kv_keys__kvkeyrange.py +++ b/test/test_kv_keys__kvkeyrange.py @@ -1,4 +1,4 @@ -from typing import Literal +from typing import Literal # noqa: TID251 import pytest diff --git a/test/test_result.py b/test/test_result.py index 1b1c4c1..3d65699 100644 --- a/test/test_result.py +++ b/test/test_result.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Literal +from typing import Literal # noqa: TID251 # noqa: TID251 from unittest.mock import Mock import pytest From d07120c78f025a9ceddcc500cfc93fbfc85c6306 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 4 Feb 2025 05:45:31 +0000 Subject: [PATCH 51/52] style: auto-format markdown files We now configure prettier to wrap markdown automatically. --- .prettierrc | 3 ++- CHANGELOG.md | 6 ++++-- README.md | 11 +++++++---- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/.prettierrc b/.prettierrc index 222861c..e659e61 100644 --- a/.prettierrc +++ b/.prettierrc @@ -1,4 +1,5 @@ { "tabWidth": 2, - "useTabs": false + "useTabs": false, + "proseWrap": "always" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a424a03..2c506eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,12 +8,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added -- Support closing `Kv` client's `session` via: ([#20](https://github.com/h4l/denokv-python/pull/20)) +- Support closing `Kv` client's `session` via: + ([#20](https://github.com/h4l/denokv-python/pull/20)) - `Kv.aclose()` - async context manager - At interpreter exit / garbage collection via `Kv.create_finalizer()` - Automatically when an interactive console exists: - - `Kv` objects created by `open_kv()` from an interactive console/REPL automatically close at exit. + - `Kv` objects created by `open_kv()` from an interactive console/REPL + automatically close at exit. - The `open_kv()` function has a `finalize` option that controls this. [unreleased]: https://github.com/h4l/denokv-python/commits/main/ diff --git a/README.md b/README.md index f5d73f2..3350bcb 100644 --- a/README.md +++ b/README.md @@ -6,14 +6,17 @@ _Connect to [Deno KV] cloud and [self-hosted] databases from Python._ [self-hosted]: https://deno.com/blog/kv-is-open-source-with-continuous-backup [denokv server]: https://github.com/denoland/denokv -The `denokv` package is an unofficial Python client for the Deno KV database. It can connect to -both the distributed cloud KV service, or self-hosted [denokv server] (which can be a replica of a cloud KV database, or standalone). +The `denokv` package is an unofficial Python client for the Deno KV database. It +can connect to both the distributed cloud KV service, or self-hosted [denokv +server] (which can be a replica of a cloud KV database, or standalone). -It implements version 3 of the [KV Connect protocol spec, published by Deno](https://github.com/denoland/denokv/blob/main/proto/kv-connect.md). +It implements version 3 of the +[KV Connect protocol spec, published by Deno](https://github.com/denoland/denokv/blob/main/proto/kv-connect.md). ## Status -The package is under active development and is not yet stable or feature-complete. +The package is under active development and is not yet stable or +feature-complete. **Working**: From 9e18b911190915fd2c33ebd3700e5c10ae307cd3 Mon Sep 17 00:00:00 2001 From: Hal Blackburn Date: Tue, 4 Feb 2025 05:45:51 +0000 Subject: [PATCH 52/52] docs: update README and CHANGELOG for write support --- CHANGELOG.md | 8 ++++++++ README.md | 11 +++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c506eb..2618018 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added +- Support writing to KV databases with `Kv.set()`, `Kv.delete()`, `Kv.sum()`, + `Kv.min()`, `Kv.max()`, `Kv.enqueue()` and `Kv.check()`. + ([#16](https://github.com/h4l/denokv-python/pull/16)) + + - These methods are available on `Kv` itself for one-off operations, and + `Kv.atomic()` can chain these methods to group write operations to apply + together in a transaction. + - Support closing `Kv` client's `session` via: ([#20](https://github.com/h4l/denokv-python/pull/20)) - `Kv.aclose()` diff --git a/README.md b/README.md index 3350bcb..7106e1b 100644 --- a/README.md +++ b/README.md @@ -20,10 +20,17 @@ feature-complete. **Working**: -- [x] Reading data with kv.get(), kv.list() +- [x] Reading data with `Kv.get()`, `Kv.list()` + - The read APIs are being reworked to improve ergonomics and functionality +- [x] Writing data with with `Kv.set()`, `Kv.delete()`, `Kv.sum()`, `Kv.min()`, + `Kv.max()`, `Kv.enqueue()` and `Kv.check()`. + - These methods are available on `Kv` itself for one-off operations, and + `Kv.atomic()` can chain these methods to group write operations to apply + together in a transaction. **To-do**: -- [ ] [Writing data / transactions](https://docs.deno.com/deploy/kv/manual/transactions/) - [ ] [Watching for changes](https://docs.deno.com/deploy/kv/manual/operations/#watch) - [ ] [Queues](https://deno.com/blog/queues) + - This is uncertain: The KV Connect protocol does not support Queues, but they + could be implemented using watching in theory.