From fabe3f01b7cc3ce26b0e461814335b108e104336 Mon Sep 17 00:00:00 2001 From: Kelly Brazil Date: Wed, 4 Jan 2023 19:00:49 -0800 Subject: [PATCH] remove type annotations for python 3.6 compatibility --- jc/parsers/tomli/_parser.py | 102 ++++++++++++++++++------------------ jc/parsers/tomli/_re.py | 12 ++--- jc/parsers/tomli/_types.py | 8 ++- 3 files changed, 57 insertions(+), 65 deletions(-) diff --git a/jc/parsers/tomli/_parser.py b/jc/parsers/tomli/_parser.py index f1bb0aa1..f3bb95f7 100644 --- a/jc/parsers/tomli/_parser.py +++ b/jc/parsers/tomli/_parser.py @@ -2,7 +2,7 @@ # SPDX-FileCopyrightText: 2021 Taneli Hukkinen # Licensed to PSF under a Contributor Agreement. -from __future__ import annotations +# from __future__ import annotations from collections.abc import Iterable import string @@ -17,7 +17,7 @@ from ._re import ( match_to_localtime, match_to_number, ) -from ._types import Key, ParseFloat, Pos +# from ._types import Key, ParseFloat, Pos ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) @@ -54,7 +54,7 @@ class TOMLDecodeError(ValueError): """An error raised if a document is not valid TOML.""" -def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: +def load(__fp, *, parse_float=float): """Parse TOML from a binary file object.""" b = __fp.read() try: @@ -66,7 +66,7 @@ def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: return loads(s, parse_float=parse_float) -def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 +def loads(__s, *, parse_float=float): """Parse TOML from a string.""" # The spec allows converting "\r\n" to "\n", even in string @@ -74,7 +74,7 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no src = __s.replace("\r\n", "\n") pos = 0 out = Output(NestedDict(), Flags()) - header: Key = () + header = () parse_float = make_safe_parse_float(parse_float) # Parse one statement at a time @@ -103,7 +103,7 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no pos = skip_chars(src, pos, TOML_WS) elif char == "[": try: - second_char: str | None = src[pos + 1] + second_char = src[pos + 1] except IndexError: second_char = None out.flags.finalize_pending() @@ -141,19 +141,19 @@ class Flags: # be opened using the "[table]" syntax. EXPLICIT_NEST = 1 - def __init__(self) -> None: - self._flags: dict[str, dict] = {} - self._pending_flags: set[tuple[Key, int]] = set() + def __init__(self): + self._flags = {} + self._pending_flags = set() - def add_pending(self, key: Key, flag: int) -> None: + def add_pending(self, key, flag): self._pending_flags.add((key, flag)) - def finalize_pending(self) -> None: + def finalize_pending(self): for key, flag in self._pending_flags: self.set(key, flag, recursive=False) self._pending_flags.clear() - def unset_all(self, key: Key) -> None: + def unset_all(self, key): cont = self._flags for k in key[:-1]: if k not in cont: @@ -161,7 +161,7 @@ class Flags: cont = cont[k]["nested"] cont.pop(key[-1], None) - def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + def set(self, key, flag, *, recursive): cont = self._flags key_parent, key_stem = key[:-1], key[-1] for k in key_parent: @@ -172,7 +172,7 @@ class Flags: cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) - def is_(self, key: Key, flag: int) -> bool: + def is_(self, key, flag): if not key: return False # document root has no flags cont = self._flags @@ -197,7 +197,7 @@ class NestedDict: def get_or_create_nest( self, - key: Key, + key, *, access_lists: bool = True, ) -> dict: @@ -212,7 +212,7 @@ class NestedDict: raise KeyError("There is no nest behind this key") return cont - def append_nest_to_list(self, key: Key) -> None: + def append_nest_to_list(self, key): cont = self.get_or_create_nest(key[:-1]) last_key = key[-1] if last_key in cont: @@ -229,7 +229,7 @@ class Output(NamedTuple): flags: Flags -def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: +def skip_chars(src: str, pos, chars): try: while src[pos] in chars: pos += 1 @@ -240,12 +240,12 @@ def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: def skip_until( src: str, - pos: Pos, + pos, expect: str, *, error_on: frozenset[str], error_on_eof: bool, -) -> Pos: +): try: new_pos = src.index(expect, pos) except ValueError: @@ -260,9 +260,9 @@ def skip_until( return new_pos -def skip_comment(src: str, pos: Pos) -> Pos: +def skip_comment(src: str, pos): try: - char: str | None = src[pos] + char = src[pos] except IndexError: char = None if char == "#": @@ -272,7 +272,7 @@ def skip_comment(src: str, pos: Pos) -> Pos: return pos -def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: +def skip_comments_and_array_ws(src, pos): while True: pos_before_skip = pos pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) @@ -281,7 +281,7 @@ def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: return pos -def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: +def create_dict_rule(src, pos, out): pos += 1 # Skip "[" pos = skip_chars(src, pos, TOML_WS) pos, key = parse_key(src, pos) @@ -299,7 +299,7 @@ def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: return pos + 1, key -def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: +def create_list_rule(src: str, pos, out): pos += 2 # Skip "[[" pos = skip_chars(src, pos, TOML_WS) pos, key = parse_key(src, pos) @@ -321,8 +321,8 @@ def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: def key_value_rule( - src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat -) -> Pos: + src: str, pos, out, header, parse_float +): pos, key, value = parse_key_value_pair(src, pos, parse_float) key_parent, key_stem = key[:-1], key[-1] abs_key_parent = header + key_parent @@ -355,11 +355,11 @@ def key_value_rule( def parse_key_value_pair( - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Key, Any]: + src: str, pos, parse_float +): pos, key = parse_key(src, pos) try: - char: str | None = src[pos] + char = src[pos] except IndexError: char = None if char != "=": @@ -370,13 +370,13 @@ def parse_key_value_pair( return pos, key, value -def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: +def parse_key(src, pos): pos, key_part = parse_key_part(src, pos) - key: Key = (key_part,) + key = (key_part,) pos = skip_chars(src, pos, TOML_WS) while True: try: - char: str | None = src[pos] + char = src[pos] except IndexError: char = None if char != ".": @@ -388,9 +388,9 @@ def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: pos = skip_chars(src, pos, TOML_WS) -def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_key_part(src, pos): try: - char: str | None = src[pos] + char = src[pos] except IndexError: char = None if char in BARE_KEY_CHARS: @@ -404,12 +404,12 @@ def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: raise suffixed_err(src, pos, "Invalid initial character for a key part") -def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_one_line_basic_str(src, pos): pos += 1 return parse_basic_str(src, pos, multiline=False) -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]: +def parse_array(src, pos, parse_float): pos += 1 array: list = [] @@ -433,7 +433,7 @@ def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list] return pos + 1, array -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]: +def parse_inline_table(src, pos, parse_float): pos += 1 nested_dict = NestedDict() flags = Flags() @@ -466,8 +466,8 @@ def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos def parse_basic_str_escape( - src: str, pos: Pos, *, multiline: bool = False -) -> tuple[Pos, str]: + src, pos, *, multiline = False +): escape_id = src[pos : pos + 2] pos += 2 if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: @@ -494,11 +494,11 @@ def parse_basic_str_escape( raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None -def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_basic_str_escape_multiline(src, pos): return parse_basic_str_escape(src, pos, multiline=True) -def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: +def parse_hex_char(src, pos, hex_len): hex_str = src[pos : pos + hex_len] if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): raise suffixed_err(src, pos, "Invalid hex value") @@ -509,7 +509,7 @@ def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: return pos, chr(hex_int) -def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_literal_str(src, pos): pos += 1 # Skip starting apostrophe start_pos = pos pos = skip_until( @@ -518,7 +518,7 @@ def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: return pos + 1, src[start_pos:pos] # Skip ending apostrophe -def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: +def parse_multiline_str(src, pos, *, literal): pos += 3 if src.startswith("\n", pos): pos += 1 @@ -549,7 +549,7 @@ def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str] return pos, result + (delim * 2) -def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: +def parse_basic_str(src, pos, *, multiline): if multiline: error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS parse_escapes = parse_basic_str_escape_multiline @@ -582,10 +582,10 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: def parse_value( # noqa: C901 - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Any]: + src, pos, parse_float +): try: - char: str | None = src[pos] + char = src[pos] except IndexError: char = None @@ -649,11 +649,11 @@ def parse_value( # noqa: C901 raise suffixed_err(src, pos, "Invalid value") -def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: +def suffixed_err(src, pos, msg): """Return a `TOMLDecodeError` where error message is suffixed with coordinates in source.""" - def coord_repr(src: str, pos: Pos) -> str: + def coord_repr(src, pos): if pos >= len(src): return "end of document" line = src.count("\n", 0, pos) + 1 @@ -670,7 +670,7 @@ def is_unicode_scalar_value(codepoint: int) -> bool: return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) -def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: +def make_safe_parse_float(parse_float): """A decorator to make `parse_float` safe. `parse_float` must not return dicts or lists, because these types @@ -679,7 +679,7 @@ def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: instead of returning illegal types. """ # The default `float` callable never returns illegal types. Optimize it. - if parse_float is float: # type: ignore[comparison-overlap] + if parse_float is float: return float def safe_parse_float(float_str: str) -> Any: diff --git a/jc/parsers/tomli/_re.py b/jc/parsers/tomli/_re.py index 994bb749..96952fcb 100644 --- a/jc/parsers/tomli/_re.py +++ b/jc/parsers/tomli/_re.py @@ -1,15 +1,9 @@ # SPDX-License-Identifier: MIT # SPDX-FileCopyrightText: 2021 Taneli Hukkinen # Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - from datetime import date, datetime, time, timedelta, timezone, tzinfo from functools import lru_cache import re -from typing import Any - -from ._types import ParseFloat # E.g. # - 00:32:00.999999 @@ -49,7 +43,7 @@ RE_DATETIME = re.compile( ) -def match_to_datetime(match: re.Match) -> datetime | date: +def match_to_datetime(match): """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. Raises ValueError if the match does not correspond to a valid date @@ -74,7 +68,7 @@ def match_to_datetime(match: re.Match) -> datetime | date: hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) micros = int(micros_str.ljust(6, "0")) if micros_str else 0 if offset_sign_str: - tz: tzinfo | None = cached_tz( + tz = cached_tz( offset_hour_str, offset_minute_str, offset_sign_str ) elif zulu_time: @@ -101,7 +95,7 @@ def match_to_localtime(match: re.Match) -> time: return time(int(hour_str), int(minute_str), int(sec_str), micros) -def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: +def match_to_number(match, parse_float): if match.group("floatpart"): return parse_float(match.group()) return int(match.group(), 0) diff --git a/jc/parsers/tomli/_types.py b/jc/parsers/tomli/_types.py index d949412e..30eb6a0b 100644 --- a/jc/parsers/tomli/_types.py +++ b/jc/parsers/tomli/_types.py @@ -2,9 +2,7 @@ # SPDX-FileCopyrightText: 2021 Taneli Hukkinen # Licensed to PSF under a Contributor Agreement. -from typing import Any, Callable, Tuple - # Type annotations -ParseFloat = Callable[[str], Any] -Key = Tuple[str, ...] -Pos = int +# ParseFloat = Callable[[str], Any] +# Key = Tuple[str, ...] +# Pos = int