diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 57fc88ec5..000000000 --- a/.coveragerc +++ /dev/null @@ -1,40 +0,0 @@ -[run] -source = . -branch = true -parallel = true -omit = - */.tox/* - */__main__.py - */setup.py - */venv*/* - # TODO: separate the tests from the test data - testsuite/E*.py - testsuite/W*.py - testsuite/latin-1.py - testsuite/noqa.py - testsuite/python*.py - testsuite/utf-8-bom.py - -[report] -show_missing = True -skip_covered = True -# TODO: increase this -fail_under = 90 -exclude_lines = - # a more strict default pragma - \# pragma: no cover\b - - # allow defensive code - ^\s*raise AssertionError\b - ^\s*raise NotImplementedError\b - ^\s*return NotImplemented\b - ^\s*raise$ - - # typing-related code - ^if (False|TYPE_CHECKING): - : \.\.\.$ - ^ +\.\.\.$ - -> ['"]?NoReturn['"]?: - - # non-runnable code - if __name__ == ['"]__main__['"]:$ diff --git a/.gitattributes b/.gitattributes index 0aadd30bd..e1d5e1b97 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1 @@ -testsuite/E90.py -text +testing/data/E90.py -text diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0d26707db..75fc886a0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -11,34 +11,42 @@ jobs: matrix: include: - os: windows-latest - py: 3.9 + py: '3.10' toxenv: py - os: ubuntu-latest - py: pypy3 + py: pypy3.10 toxenv: py - os: ubuntu-latest - py: 3.6 + py: '3.10' toxenv: py - os: ubuntu-latest - py: 3.7 + py: '3.11' toxenv: py - os: ubuntu-latest - py: 3.8 + py: '3.12' toxenv: py - os: ubuntu-latest - py: 3.9 + py: '3.13' toxenv: py - os: ubuntu-latest - py: 3.10-dev + py: '3.14' toxenv: py - os: ubuntu-latest - py: 3.9 + py: '3.15-dev' + toxenv: py + - os: ubuntu-latest + py: '3.10' toxenv: flake8 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.py }} + if: matrix.py != '3.14-dev' + - uses: deadsnakes/action@v3.0.1 with: python-version: ${{ matrix.py }} + if: matrix.py == '3.14-dev' - run: pip install tox - run: tox -e ${{ matrix.toxenv }} diff --git a/.gitignore b/.gitignore index a28481737..ba74c72f3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ *.egg *.egg-info *.pyc -/.coverage +/.coverage* /.tox /build/ /dist diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fea421651..a1ea107d8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,23 +1,27 @@ -exclude: ^testsuite/ +exclude: ^testing/data/ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.2.0 + rev: v6.0.0 hooks: - id: check-yaml - id: debug-statements - id: end-of-file-fixer - id: trailing-whitespace -- repo: https://github.com/asottile/reorder_python_imports - rev: v3.0.1 +- repo: https://github.com/asottile/reorder-python-imports + rev: v3.16.0 hooks: - id: reorder-python-imports - args: [--py36-plus] + args: [--py310-plus] - repo: https://github.com/asottile/pyupgrade - rev: v2.32.0 + rev: v3.21.2 hooks: - id: pyupgrade - args: [--py36-plus] + args: [--py310-plus] +- repo: https://github.com/asottile/setup-cfg-fmt + rev: v3.2.0 + hooks: + - id: setup-cfg-fmt - repo: https://github.com/pycqa/flake8 - rev: 4.0.1 + rev: 7.3.0 hooks: - id: flake8 diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..87bb5a6a3 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,8 @@ +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.11" +sphinx: + configuration: docs/conf.py diff --git a/CHANGES.txt b/CHANGES.txt index 49db29b21..aef354853 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,6 +1,93 @@ Changelog ========= +2.14.0 (2025-06-20) +------------------- + +Changes: + +* Add support for python 3.14. PR #1283. +* Fix false positive for TypeVar defaults with more than one argument. + PR #1286. + +2.13.0 (2025-03-29) +------------------- + +Changes: + +* Improve performance. PR #1254. PR #1255. +* Drop EOL python 3.8. PR #1267. +* E251: fix false positive for PEP 696 defaults. PR #1278. PR #1279. + +2.12.1 (2024-08-04) +------------------- + +Changes: + +* Properly preserve escaped `{` and `}` in fstrings in logical lines in 3.12+. + PR #1252. + +2.12.0 (2024-06-15) +------------------- + +Changes: + +* E721: Fix false positive of the form `x.type(...) ==`. PR #1228. +* E502: Fix false-negative with a backslash escape in a comment. PR #1234. +* E204: New lint forbidding whitespace after decorator `@`. PR #1247. + +2.11.1 (2023-10-12) +------------------- + +Changes: + +* E275: fix false positive with fstrings containing keyword parts in python 3.12 + +2.11.0 (2023-07-29) +------------------- + +Changes: + +* Drop EOL python 3.6 / 3.7. PR #1129, #1160. +* Add support for python 3.12. PR #1147, #1148, #1152, #1153, #1154, #1163, + #1164, #1165, #1166, #1176, #1177, #1182. +* E721: adjust handling of type comparison. Allowed forms are now + ``isinstance(x, t)`` or ``type(x) is t``. PR #1086, #1167. +* Remove handling of python 2 ``<>`` operator. PR #1161. +* W606: removed. ``async`` / ``await`` are always keywords. PR #1162. +* Internal: move tests to pytest. PR #1168, #1169, #1171, #1173, #1174, #1175. +* Remove handling of python 2 ``ur''`` strings. PR #1181. + + +2.10.0 (2022-11-23) +------------------- + +Changes: + +* E231: allow trailing comma inside 1-tuples in ``[]``. PR #1108. +* W601, W602, W603, W604: removed (no longer relevant in python 3). PR #1111. +* E741: also apply to lambdas. PR #1106. +* E741: fix false positive for comparison operators. PR #1118. + +2.9.1 (2022-08-03) +------------------ + +Changes: + +* E275: fix false positive for yield expressions. PR #1091. + +2.9.0 (2022-07-30) +------------------ + +Changes: + +* E221, E222, E223, E224: add support for ``:=`` operator. PR #1032. +* Drop python 2.7 / 3.5. +* E262: consider non-breaking spaces (``\xa0``) as whitespace. PR #1035. +* Improve performance of ``_is_binary_operator``. PR #1052. +* E275: requires whitespace around keywords. PR #1063. +* Add support for python 3.11. PR #1070. + 2.8.0 (2021-10-10) ------------------ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 5515108ed..e7a79ae31 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -70,32 +70,28 @@ GitHub has an excellent `guide`_. The current tests are written in 2 styles: -* standard xUnit based only on stdlib unittest -* functional test using a custom framework and executed by the - pycodestyle itself when installed in dev mode. +* pytest tests +* functional test using a custom framework -Running unittest -~~~~~~~~~~~~~~~~ +Running tests +~~~~~~~~~~~~~ -The tests are written using stdlib ``unittest`` module, the existing tests +The tests are written using ``pytest``, the existing tests include unit, integration and functional tests. To run the tests:: - $ python setup.py test + $ pytest tests Running functional ~~~~~~~~~~~~~~~~~~ -When installed in dev mode, pycodestyle will have the ``--testsuite`` option -which can be used to run the tests:: - $ pip install -e . $ # Run all tests. - $ pycodestyle --testsuite testsuite + $ pytest tests/test_data.py $ # Run a subset of the tests. - $ pycodestyle --testsuite testsuite/E30.py + $ pytest tests/tests_data.py -k testing/data/E30.py .. _virtualenv: http://docs.python-guide.org/en/latest/dev/virtualenvs/ diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index fb8bc97dd..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,10 +0,0 @@ -include *.txt -include *.rst -include LICENSE -recursive-include docs * -recursive-include testsuite * -recursive-exclude docs *.pyc -recursive-exclude docs *.pyo -recursive-exclude testsuite *.pyc -recursive-exclude testsuite *.pyo -prune docs/_build diff --git a/README.rst b/README.rst index c71b933ee..bab8d71cf 100644 --- a/README.rst +++ b/README.rst @@ -65,17 +65,15 @@ Example usage and output optparse.py:69:11: E401 multiple imports on one line optparse.py:77:1: E302 expected 2 blank lines, found 1 optparse.py:88:5: E301 expected 1 blank line, found 0 - optparse.py:222:34: W602 deprecated form of raising exception optparse.py:347:31: E211 whitespace before '(' optparse.py:357:17: E201 whitespace after '{' optparse.py:472:29: E221 multiple spaces before operator - optparse.py:544:21: W601 .has_key() is deprecated, use 'in' You can also make ``pycodestyle.py`` show the source code for each error, and even the relevant text from PEP 8:: - $ pycodestyle --show-source --show-pep8 testsuite/E40.py - testsuite/E40.py:2:10: E401 multiple imports on one line + $ pycodestyle --show-source --show-pep8 testing/data/E40.py + testing/data/E40.py:2:10: E401 multiple imports on one line import os, sys ^ Imports should usually be on separate lines. @@ -97,8 +95,6 @@ Or you can display how often each error was found:: 165 E303 too many blank lines (4) 325 E401 multiple imports on one line 3615 E501 line too long (82 characters) - 612 W601 .has_key() is deprecated, use 'in' - 1188 W602 deprecated form of raising exception Links ----- diff --git a/docs/advanced.rst b/docs/advanced.rst index 89700d564..4769e06f6 100644 --- a/docs/advanced.rst +++ b/docs/advanced.rst @@ -29,7 +29,7 @@ There's also a shortcut for checking a single file:: import pycodestyle - fchecker = pycodestyle.Checker('testsuite/E27.py', show_source=True) + fchecker = pycodestyle.Checker('testing/data/E27.py', show_source=True) file_errors = fchecker.check_all() print("Found %s errors (and warnings)" % file_errors) diff --git a/docs/developer.rst b/docs/developer.rst index 74e3ede76..5aaee6204 100644 --- a/docs/developer.rst +++ b/docs/developer.rst @@ -89,16 +89,15 @@ Several docstrings contain examples directly from the `PEP 8`_ document. Okay: spam(ham[1], {eggs: 2}) E201: spam( ham[1], {eggs: 2}) -These examples are verified automatically when ``pycodestyle.py`` is run with -the ``--doctest`` option. You can add examples for your own check functions. +These examples are verified automatically by ``test_self_doctest.py``. +You can add examples for your own check functions. The format is simple: ``"Okay"`` or error/warning code followed by colon and space, the rest of the line is example source code. If you put ``'r'`` before the docstring, you can use ``\n`` for newline and ``\t`` for tab. Then be sure to pass the tests:: - $ python pycodestyle.py --testsuite testsuite - $ python pycodestyle.py --doctest + $ pytest tests $ python pycodestyle.py --verbose pycodestyle.py When contributing to pycodestyle, please observe our `Code of Conduct`_. diff --git a/docs/intro.rst b/docs/intro.rst index 2f1081389..a7187a270 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -71,17 +71,15 @@ Example usage and output optparse.py:69:11: E401 multiple imports on one line optparse.py:77:1: E302 expected 2 blank lines, found 1 optparse.py:88:5: E301 expected 1 blank line, found 0 - optparse.py:222:34: W602 deprecated form of raising exception optparse.py:347:31: E211 whitespace before '(' optparse.py:357:17: E201 whitespace after '{' optparse.py:472:29: E221 multiple spaces before operator - optparse.py:544:21: W601 .has_key() is deprecated, use 'in' You can also make ``pycodestyle.py`` show the source code for each error, and even the relevant text from PEP 8:: - $ pycodestyle --show-source --show-pep8 testsuite/E40.py - testsuite/E40.py:2:10: E401 multiple imports on one line + $ pycodestyle --show-source --show-pep8 testing/data/E40.py + testing/data/E40.py:2:10: E401 multiple imports on one line import os, sys ^ Imports should usually be on separate lines. @@ -103,20 +101,18 @@ Or you can display how often each error was found:: 165 E303 too many blank lines (4) 325 E401 multiple imports on one line 3615 E501 line too long (82 characters) - 612 W601 .has_key() is deprecated, use 'in' - 1188 W602 deprecated form of raising exception You can also make ``pycodestyle.py`` show the error text in different formats by using ``--format`` having options default/pylint/custom:: - $ pycodestyle testsuite/E40.py --format=default - testsuite/E40.py:2:10: E401 multiple imports on one line + $ pycodestyle testing/data/E40.py --format=default + testing/data/E40.py:2:10: E401 multiple imports on one line - $ pycodestyle testsuite/E40.py --format=pylint - testsuite/E40.py:2: [E401] multiple imports on one line + $ pycodestyle testing/data/E40.py --format=pylint + testing/data/E40.py:2: [E401] multiple imports on one line - $ pycodestyle testsuite/E40.py --format='%(path)s|%(row)d|%(col)d| %(code)s %(text)s' - testsuite/E40.py|2|10| E401 multiple imports on one line + $ pycodestyle testing/data/E40.py --format='%(path)s|%(row)d|%(col)d| %(code)s %(text)s' + testing/data/E40.py|2|10| E401 multiple imports on one line Variables in the ``custom`` format option @@ -200,7 +196,7 @@ Example:: [pycodestyle] count = False - ignore = E226,E302,E41 + ignore = E226,E302,E71 max-line-length = 160 statistics = True @@ -266,6 +262,8 @@ This is the current list of error and warning codes: +------------+----------------------------------------------------------------------+ | E203 | whitespace before ',', ';', or ':' | +------------+----------------------------------------------------------------------+ +| E204 | whitespace after decorator '@' | ++------------+----------------------------------------------------------------------+ +------------+----------------------------------------------------------------------+ | E211 | whitespace before '(' | +------------+----------------------------------------------------------------------+ @@ -415,18 +413,8 @@ This is the current list of error and warning codes: +------------+----------------------------------------------------------------------+ | **W6** | *Deprecation warning* | +------------+----------------------------------------------------------------------+ -| W601 | .has_key() is deprecated, use 'in' | -+------------+----------------------------------------------------------------------+ -| W602 | deprecated form of raising exception | -+------------+----------------------------------------------------------------------+ -| W603 | '<>' is deprecated, use '!=' | -+------------+----------------------------------------------------------------------+ -| W604 | backticks are deprecated, use 'repr()' | -+------------+----------------------------------------------------------------------+ | W605 | invalid escape sequence '\x' | +------------+----------------------------------------------------------------------+ -| W606 | 'async' and 'await' are reserved keywords starting with Python 3.7 | -+------------+----------------------------------------------------------------------+ **(*)** In the default configuration, the checks **E121**, **E123**, **E126**, **E133**, @@ -447,7 +435,7 @@ special comment. This possibility should be reserved for special cases. Note: most errors can be listed with such one-liner:: - $ python pycodestyle.py --first --select E,W testsuite/ --format '%(code)s: %(text)s' + $ python pycodestyle.py --first --select E,W testing/data --format '%(code)s: %(text)s' .. _related-tools: diff --git a/pycodestyle.py b/pycodestyle.py index e2e4b96bb..868e79d5a 100755 --- a/pycodestyle.py +++ b/pycodestyle.py @@ -47,7 +47,9 @@ 900 syntax error """ import bisect +import configparser import inspect +import io import keyword import os import re @@ -57,29 +59,17 @@ import warnings from fnmatch import fnmatch from functools import lru_cache +from itertools import pairwise from optparse import OptionParser -try: - from configparser import RawConfigParser - from io import TextIOWrapper -except ImportError: - from ConfigParser import RawConfigParser - -# this is a performance hack. see https://bugs.python.org/issue43014 -if ( - sys.version_info < (3, 10) and - callable(getattr(tokenize, '_compile', None)) -): # pragma: no cover (>', '**', '*', '+', '-']) ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) -ASSIGNMENT_EXPRESSION_OP = [':='] if sys.version_info >= (3, 8) else [] WS_NEEDED_OPERATORS = frozenset([ - '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', + '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<', '>', '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=', - 'and', 'in', 'is', 'or', '->'] + - ASSIGNMENT_EXPRESSION_OP) + 'and', 'in', 'is', 'or', '->', ':=']) WHITESPACE = frozenset(' \t\xa0') NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) @@ -124,25 +111,25 @@ BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] INDENT_REGEX = re.compile(r'([ \t]*)') -RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,') -RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') DOCSTRING_REGEX = re.compile(r'u?r?["\']') EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[\[({][ \t]|[ \t][\]}),;:](?!=)') +WHITESPACE_AFTER_DECORATOR_REGEX = re.compile(r'@\s') WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' r'\s*(?(1)|(None|False|True))\b') COMPARE_NEGATIVE_REGEX = re.compile(r'\b(?%&^]+|:=)(\s*)') LAMBDA_REGEX = re.compile(r'\blambda\b') HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') +STARTSWITH_GENERIC_REGEX = re.compile(r'^(async\s+def|def|class|type)\s+\w+\[') STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( r'^\s*({})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( @@ -158,6 +145,20 @@ DUNDER_REGEX = re.compile(r"^__([^\s]+)__(?::\s*[a-zA-Z.0-9_\[\]\"]+)? = ") BLANK_EXCEPT_REGEX = re.compile(r"except\s*:") +if sys.version_info >= (3, 12): # pragma: >=3.12 cover + FSTRING_START = tokenize.FSTRING_START + FSTRING_MIDDLE = tokenize.FSTRING_MIDDLE + FSTRING_END = tokenize.FSTRING_END +else: # pragma: <3.12 cover + FSTRING_START = FSTRING_MIDDLE = FSTRING_END = -1 + +if sys.version_info >= (3, 14): # pragma: >=3.14 cover + TSTRING_START = tokenize.TSTRING_START + TSTRING_MIDDLE = tokenize.TSTRING_MIDDLE + TSTRING_END = tokenize.TSTRING_END +else: # pragma: <3.14 cover + TSTRING_START = TSTRING_MIDDLE = TSTRING_END = -1 + _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} @@ -204,7 +205,6 @@ def tabs_or_spaces(physical_line, indent_char): These options are highly recommended! Okay: if a == 0:\n a = 1\n b = 1 - E101: if a == 0:\n a = 1\n\tb = 1 """ indent = INDENT_REGEX.match(physical_line).group(1) for offset, char in enumerate(indent): @@ -235,9 +235,11 @@ def trailing_whitespace(physical_line): W291: spam(1) \n# W293: class Foo(object):\n \n bang = 12 """ - physical_line = physical_line.rstrip('\n') # chr(10), newline - physical_line = physical_line.rstrip('\r') # chr(13), carriage return - physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L + # Strip these trailing characters: + # - chr(10), newline + # - chr(13), carriage return + # - chr(12), form feed, ^L + physical_line = physical_line.rstrip('\n\r\x0c') stripped = physical_line.rstrip(' \t\v') if physical_line != stripped: if stripped: @@ -441,6 +443,9 @@ def extraneous_whitespace(logical_line): E203: if x == 4: print x, y; x, y = y , x E203: if x == 4: print x, y ; x, y = y, x E203: if x == 4 : print x, y; x, y = y, x + + Okay: @decorator + E204: @ decorator """ line = logical_line for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): @@ -454,6 +459,9 @@ def extraneous_whitespace(logical_line): code = ('E202' if char in '}])' else 'E203') # if char in ',;:' yield found, f"{code} whitespace before '{char}'" + if WHITESPACE_AFTER_DECORATOR_REGEX.match(logical_line): + yield 1, "E204 whitespace after decorator '@'" + @register_check def whitespace_around_keywords(logical_line): @@ -488,47 +496,18 @@ def missing_whitespace_after_keyword(logical_line, tokens): E275: from importable.module import(bar, baz) E275: if(foo): bar """ - for tok0, tok1 in zip(tokens, tokens[1:]): + for tok0, tok1 in pairwise(tokens): # This must exclude the True/False/None singletons, which can # appear e.g. as "if x is None:", and async/await, which were # valid identifier names in old Python versions. if (tok0.end == tok1.start and + tok0.type == tokenize.NAME and keyword.iskeyword(tok0.string) and tok0.string not in SINGLETONS and - tok0.string not in ('async', 'await') and - tok1.string not in ':\n'): - line, pos = tok0.end - yield pos, "E275 missing whitespace after keyword" - - -@register_check -def missing_whitespace(logical_line): - r"""Each comma, semicolon or colon should be followed by whitespace. - - Okay: [a, b] - Okay: (3,) - Okay: a[1:4] - Okay: a[:4] - Okay: a[1:] - Okay: a[1:4:2] - E231: ['a','b'] - E231: foo(bar,baz) - E231: [{'a':'b'}] - """ - line = logical_line - for index in range(len(line) - 1): - char = line[index] - next_char = line[index + 1] - if char in ',;:' and next_char not in WHITESPACE: - before = line[:index] - if char == ':' and before.count('[') > before.count(']') and \ - before.rfind('{') < before.rfind('['): - continue # Slice syntax, no space required - if char == ',' and next_char == ')': - continue # Allow tuple with only one element: (3,) - if char == ':' and next_char == '=' and sys.version_info >= (3, 8): - continue # Allow assignment expression - yield index, "E231 missing whitespace after '%s'" % char + not (tok0.string == 'except' and tok1.string == '*') and + not (tok0.string == 'yield' and tok1.string == ')') and + (tok1.string and tok1.string != ':' and tok1.string != '\n')): + yield tok0.end, "E275 missing whitespace after keyword" @register_check @@ -719,8 +698,12 @@ def continued_indentation(logical_line, tokens, indent_level, hang_closing, if verbose >= 4: print(f"bracket depth {depth} indent to {start[1]}") # deal with implicit string concatenation - elif (token_type in (tokenize.STRING, tokenize.COMMENT) or - text in ('u', 'ur', 'b', 'br')): + elif token_type in { + tokenize.STRING, + tokenize.COMMENT, + FSTRING_START, + TSTRING_START + }: indent_chances[start[1]] = str # visual indent after assert/raise/with elif not row and not depth and text in ["assert", "raise", "with"]: @@ -808,9 +791,9 @@ def whitespace_before_parameters(logical_line, tokens): (index < 2 or tokens[index - 2][1] != 'class') and # Allow "return (a.foo for a in range(5))" not keyword.iskeyword(prev_text) and - # 'match' and 'case' are only soft keywords ( - sys.version_info < (3, 9) or + # 3.12+: type is a soft keyword but no braces after + prev_text == 'type' or not keyword.issoftkeyword(prev_text) ) ): @@ -845,14 +828,16 @@ def whitespace_around_operator(logical_line): @register_check -def missing_whitespace_around_operator(logical_line, tokens): - r"""Surround operators with a single space on either side. +def missing_whitespace(logical_line, tokens): + r"""Surround operators with the correct amount of whitespace. - Always surround these binary operators with a single space on either side: assignment (=), augmented assignment (+=, -= etc.), comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), Booleans (and, or, not). + - Each comma, semicolon or colon should be followed by whitespace. + - If operators with different priorities are used, consider adding whitespace around the operators with the lowest priorities. @@ -863,6 +848,13 @@ def missing_whitespace_around_operator(logical_line, tokens): Okay: c = (a + b) * (a - b) Okay: foo(bar, key='word', *args, **kwargs) Okay: alpha[:-i] + Okay: [a, b] + Okay: (3,) + Okay: a[3,] = 1 + Okay: a[1:4] + Okay: a[:4] + Okay: a[1:] + Okay: a[1:4:2] E225: i=i+1 E225: submitted +=1 @@ -873,19 +865,59 @@ def missing_whitespace_around_operator(logical_line, tokens): E226: hypot2 = x*x + y*y E227: c = a|b E228: msg = fmt%(errno, errmsg) + E231: ['a','b'] + E231: foo(bar,baz) + E231: [{'a':'b'}] """ - parens = 0 need_space = False prev_type = tokenize.OP prev_text = prev_end = None operator_types = (tokenize.OP, tokenize.NAME) + brace_stack = [] for token_type, text, start, end, line in tokens: + if token_type == tokenize.OP and text in {'[', '(', '{'}: + brace_stack.append(text) + elif token_type == FSTRING_START: # pragma: >=3.12 cover + brace_stack.append('f') + elif token_type == TSTRING_START: # pragma: >=3.14 cover + brace_stack.append('t') + elif token_type == tokenize.NAME and text == 'lambda': + brace_stack.append('l') + elif brace_stack: + if token_type == tokenize.OP and text in {']', ')', '}'}: + brace_stack.pop() + elif token_type == FSTRING_END: # pragma: >=3.12 cover + brace_stack.pop() + elif token_type == TSTRING_END: # pragma: >=3.14 cover + brace_stack.pop() + elif ( + brace_stack[-1] == 'l' and + token_type == tokenize.OP and + text == ':' + ): + brace_stack.pop() + if token_type in SKIP_COMMENTS: continue - if text in ('(', 'lambda'): - parens += 1 - elif text == ')': - parens -= 1 + + if token_type == tokenize.OP and text in {',', ';', ':'}: + next_char = line[end[1]:end[1] + 1] + if next_char not in WHITESPACE and next_char not in '\r\n': + # slice + if text == ':' and brace_stack[-1:] == ['[']: + pass + # 3.12+ fstring format specifier + elif text == ':' and brace_stack[-2:] == ['f', '{']: # pragma: >=3.12 cover # noqa: E501 + pass + # 3.14+ tstring format specifier + elif text == ':' and brace_stack[-2:] == ['t', '{']: # pragma: >=3.14 cover # noqa: E501 + pass + # tuple (and list for some reason?) + elif text == ',' and next_char in ')]': + pass + else: + yield start, f'E231 missing whitespace after {text!r}' + if need_space: if start != prev_end: # Found a (probably) needed space @@ -893,10 +925,6 @@ def missing_whitespace_around_operator(logical_line, tokens): yield (need_space[0], "E225 missing whitespace around operator") need_space = False - elif text == '>' and prev_text in ('<', '-'): - # Tolerate the "<>" operator, even if running Python 3 - # Deal with Python 3's annotated return value "->" - pass elif ( # def f(a, /, b): # ^ @@ -926,8 +954,18 @@ def missing_whitespace_around_operator(logical_line, tokens): "around %s operator" % (code, optype)) need_space = False elif token_type in operator_types and prev_end is not None: - if text == '=' and parens: - # Allow keyword args or defaults: foo(bar=None). + if ( + text == '=' and ( + # allow lambda default args: lambda x=None: None + brace_stack[-1:] == ['l'] or + # allow keyword args or defaults: foo(bar=None). + brace_stack[-1:] == ['('] or + # allow python 3.8 fstring repr specifier + brace_stack[-2:] == ['f', '{'] or + # allow python 3.8 fstring repr specifier + brace_stack[-2:] == ['t', '{'] + ) + ): pass elif text in WS_NEEDED_OPERATORS: need_space = True @@ -937,10 +975,8 @@ def missing_whitespace_around_operator(logical_line, tokens): # Allow argument unpacking: foo(*args, **kwargs). if prev_type == tokenize.OP and prev_text in '}])' or ( prev_type != tokenize.OP and - prev_text not in KEYWORDS and ( - sys.version_info < (3, 9) or - not keyword.issoftkeyword(prev_text) - ) + prev_text not in KEYWORDS and + not keyword.issoftkeyword(prev_text) ): need_space = None elif text in WS_OPTIONAL_OPERATORS: @@ -999,12 +1035,13 @@ def whitespace_around_named_parameter_equals(logical_line, tokens): E251: return magic(r = real, i = imag) E252: def complex(real, image: float=0.0): """ - parens = 0 + paren_stack = [] no_space = False require_space = False prev_end = None annotated_func_arg = False in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) + in_generic = bool(STARTSWITH_GENERIC_REGEX.match(logical_line)) message = "E251 unexpected spaces around keyword / parameter equals" missing_message = "E252 missing whitespace around parameter equals" @@ -1022,15 +1059,23 @@ def whitespace_around_named_parameter_equals(logical_line, tokens): yield (prev_end, missing_message) if token_type == tokenize.OP: if text in '([': - parens += 1 - elif text in ')]': - parens -= 1 - elif in_def and text == ':' and parens == 1: + paren_stack.append(text) + elif text in ')]' and paren_stack: + paren_stack.pop() + # def f(arg: tp = default): ... + elif text == ':' and in_def and paren_stack == ['(']: annotated_func_arg = True - elif parens == 1 and text == ',': + elif len(paren_stack) == 1 and text == ',': annotated_func_arg = False - elif parens and text == '=': - if annotated_func_arg and parens == 1: + elif paren_stack and text == '=': + if ( + # PEP 696 defaults always use spaced-style `=` + # type A[T = default] = ... + # def f[T = default](): ... + # class C[T = default](): ... + (in_generic and paren_stack == ['[']) or + (annotated_func_arg and paren_stack == ['(']) + ): require_space = True if start == prev_end: yield (prev_end, missing_message) @@ -1038,7 +1083,7 @@ def whitespace_around_named_parameter_equals(logical_line, tokens): no_space = True if start != prev_end: yield (prev_end, message) - if not parens: + if not paren_stack: annotated_func_arg = False prev_end = end @@ -1109,6 +1154,22 @@ def imports_on_separate_lines(logical_line): yield found, "E401 multiple imports on one line" +_STRING_PREFIXES = frozenset(('u', 'U', 'b', 'B', 'r', 'R')) + + +def _is_string_literal(line): + if line: + first_char = line[0] + if first_char in _STRING_PREFIXES: + first_char = line[1] + return first_char == '"' or first_char == "'" + return False + + +_ALLOWED_KEYWORDS_IN_IMPORTS = ( + 'try', 'except', 'else', 'finally', 'with', 'if', 'elif') + + @register_check def module_imports_on_top_of_file( logical_line, indent_level, checker_state, noqa): @@ -1121,25 +1182,12 @@ def module_imports_on_top_of_file( Okay: # this is a comment\nimport os Okay: '''this is a module docstring'''\nimport os Okay: r'''this is a module docstring'''\nimport os - Okay: - try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y - Okay: - try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y E402: a=1\nimport os E402: 'One string'\n"Two string"\nimport os E402: a=1\nfrom sys import x Okay: if x:\n import os """ # noqa - def is_string_literal(line): - if line[0] in 'uUbB': - line = line[1:] - if line and line[0] in 'rR': - line = line[1:] - return line and (line[0] == '"' or line[0] == "'") - - allowed_keywords = ( - 'try', 'except', 'else', 'finally', 'with', 'if', 'elif') if indent_level: # Allow imports in conditional statement/function return @@ -1147,25 +1195,25 @@ def is_string_literal(line): return if noqa: return - line = logical_line - if line.startswith('import ') or line.startswith('from '): + if logical_line.startswith(('import ', 'from ')): if checker_state.get('seen_non_imports', False): yield 0, "E402 module level import not at top of file" - elif re.match(DUNDER_REGEX, line): - return - elif any(line.startswith(kw) for kw in allowed_keywords): - # Allow certain keywords intermixed with imports in order to - # support conditional or filtered importing - return - elif is_string_literal(line): - # The first literal is a docstring, allow it. Otherwise, report - # error. - if checker_state.get('seen_docstring', False): - checker_state['seen_non_imports'] = True + elif not checker_state.get('seen_non_imports', False): + if DUNDER_REGEX.match(logical_line): + return + elif logical_line.startswith(_ALLOWED_KEYWORDS_IN_IMPORTS): + # Allow certain keywords intermixed with imports in order to + # support conditional or filtered importing + return + elif _is_string_literal(logical_line): + # The first literal is a docstring, allow it. Otherwise, + # report error. + if checker_state.get('seen_docstring', False): + checker_state['seen_non_imports'] = True + else: + checker_state['seen_docstring'] = True else: - checker_state['seen_docstring'] = True - else: - checker_state['seen_non_imports'] = True + checker_state['seen_non_imports'] = True @register_check @@ -1205,11 +1253,12 @@ def compound_statements(logical_line): counts = {char: 0 for char in '{}[]()'} while -1 < found < last_char: update_counts(line[prev_found:found], counts) - if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) - counts['['] <= counts[']'] and # [1:2] (slice) - counts['('] <= counts[')']) and # (annotation) - not (sys.version_info >= (3, 8) and - line[found + 1] == '=')): # assignment expression + if ( + counts['{'] <= counts['}'] and # {'a': 1} (dict) + counts['['] <= counts[']'] and # [1:2] (slice) + counts['('] <= counts[')'] and # (annotation) + line[found + 1] != '=' # assignment expression + ): lambda_kw = LAMBDA_REGEX.search(line, 0, found) if lambda_kw: before = line[:lambda_kw.start()].rstrip() @@ -1258,6 +1307,8 @@ def explicit_line_join(logical_line, tokens): comment = True if start[0] != prev_start and parens and backslash and not comment: yield backslash, "E502 the backslash is redundant between brackets" + if start[0] != prev_start: + comment = False # Reset comment flag on newline if end[0] != prev_end: if line.rstrip('\r\n').endswith('\\'): backslash = (end[0], len(line.splitlines()[-1]) - 1) @@ -1440,19 +1491,24 @@ def comparison_negative(logical_line): @register_check def comparison_type(logical_line, noqa): - r"""Object type comparisons should always use isinstance(). + r"""Object type comparisons should `is` / `is not` / `isinstance()`. Do not compare types directly. Okay: if isinstance(obj, int): - E721: if type(obj) is type(1): + Okay: if type(obj) is int: + E721: if type(obj) == type(1): """ match = COMPARE_TYPE_REGEX.search(logical_line) if match and not noqa: inst = match.group(1) if inst and inst.isidentifier() and inst not in SINGLETONS: return # Allow comparison for types which are not obvious - yield match.start(), "E721 do not compare types, use 'isinstance()'" + yield ( + match.start(), + "E721 do not compare types, for exact checks use `is` / `is not`, " + "for instance checks use `isinstance()`", + ) @register_check @@ -1487,14 +1543,19 @@ def ambiguous_identifier(logical_line, tokens): E741: I = 42 Variables can be bound in several other contexts, including class - and function definitions, 'global' and 'nonlocal' statements, - exception handlers, and 'with' and 'for' statements. + and function definitions, lambda functions, 'global' and 'nonlocal' + statements, exception handlers, and 'with' and 'for' statements. In addition, we have a special handling for function parameters. Okay: except AttributeError as o: Okay: with lock as L: Okay: foo(l=12) + Okay: foo(l=I) Okay: for a in foo(l=12): + Okay: lambda arg: arg * l + Okay: lambda a=l[I:5]: None + Okay: lambda x=a.I: None + Okay: if l >= 12: E741: except AttributeError as O: E741: with lock as l: E741: global I @@ -1503,32 +1564,39 @@ def ambiguous_identifier(logical_line, tokens): E741: def foo(l=12): E741: l = foo(l=12) E741: for l in range(10): + E741: [l for l in lines if l] + E741: lambda l: None + E741: lambda a=x[1:5], l: None + E741: lambda **l: + E741: def f(**l): E742: class I(object): E743: def l(x): """ - is_func_def = False # Set to true if 'def' is found - parameter_parentheses_level = 0 + func_depth = None # set to brace depth if 'def' or 'lambda' is found + seen_colon = False # set to true if we're done with function parameters + brace_depth = 0 idents_to_avoid = ('l', 'O', 'I') prev_type, prev_text, prev_start, prev_end, __ = tokens[0] - for token_type, text, start, end, line in tokens[1:]: + for index in range(1, len(tokens)): + token_type, text, start, end, line = tokens[index] ident = pos = None # find function definitions - if prev_text == 'def': - is_func_def = True + if prev_text in {'def', 'lambda'}: + func_depth = brace_depth + seen_colon = False + elif ( + func_depth is not None and + text == ':' and + brace_depth == func_depth + ): + seen_colon = True # update parameter parentheses level - if parameter_parentheses_level == 0 and \ - prev_type == tokenize.NAME and \ - token_type == tokenize.OP and text == '(': - parameter_parentheses_level = 1 - elif parameter_parentheses_level > 0 and \ - token_type == tokenize.OP: - if text == '(': - parameter_parentheses_level += 1 - elif text == ')': - parameter_parentheses_level -= 1 + if text in '([{': + brace_depth += 1 + elif text in ')]}': + brace_depth -= 1 # identifiers on the lhs of an assignment operator - if token_type == tokenize.OP and '=' in text and \ - parameter_parentheses_level == 0: + if text == ':=' or (text == '=' and brace_depth == 0): if prev_text in idents_to_avoid: ident = prev_text pos = prev_start @@ -1538,11 +1606,16 @@ def ambiguous_identifier(logical_line, tokens): if text in idents_to_avoid: ident = text pos = start - # function parameter definitions - if is_func_def: - if text in idents_to_avoid: - ident = text - pos = start + # function / lambda parameter definitions + if ( + func_depth is not None and + not seen_colon and + index < len(tokens) - 1 and tokens[index + 1][1] in ':,=)' and + prev_text in {'lambda', ',', '*', '**', '('} and + text in idents_to_avoid + ): + ident = text + pos = start if prev_text == 'class': if text in idents_to_avoid: yield start, "E742 ambiguous class definition '%s'" % text @@ -1551,62 +1624,31 @@ def ambiguous_identifier(logical_line, tokens): yield start, "E743 ambiguous function definition '%s'" % text if ident: yield pos, "E741 ambiguous variable name '%s'" % ident - prev_type = token_type prev_text = text prev_start = start -@register_check -def python_3000_has_key(logical_line, noqa): - r"""The {}.has_key() method is removed in Python 3: use the 'in' - operator. - - Okay: if "alph" in d:\n print d["alph"] - W601: assert d.has_key('alph') - """ - pos = logical_line.find('.has_key(') - if pos > -1 and not noqa: - yield pos, "W601 .has_key() is deprecated, use 'in'" - - -@register_check -def python_3000_raise_comma(logical_line): - r"""When raising an exception, use "raise ValueError('message')". - - The older form is removed in Python 3. - - Okay: raise DummyError("Message") - W602: raise DummyError, "Message" - """ - match = RAISE_COMMA_REGEX.match(logical_line) - if match and not RERAISE_COMMA_REGEX.match(logical_line): - yield match.end() - 1, "W602 deprecated form of raising exception" - - -@register_check -def python_3000_not_equal(logical_line): - r"""New code should always use != instead of <>. - - The older syntax is removed in Python 3. - - Okay: if a != 'no': - W603: if a <> 'no': - """ - pos = logical_line.find('<>') - if pos > -1: - yield pos, "W603 '<>' is deprecated, use '!='" - - -@register_check -def python_3000_backticks(logical_line): - r"""Use repr() instead of backticks in Python 3. - - Okay: val = repr(1 + 2) - W604: val = `1 + 2` - """ - pos = logical_line.find('`') - if pos > -1: - yield pos, "W604 backticks are deprecated, use 'repr()'" +# https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals +_PYTHON_3000_VALID_ESC = frozenset([ + '\n', + '\\', + '\'', + '"', + 'a', + 'b', + 'f', + 'n', + 'r', + 't', + 'v', + '0', '1', '2', '3', '4', '5', '6', '7', + 'x', + + # Escape sequences only recognized in string literals + 'N', + 'u', + 'U', +]) @register_check @@ -1619,124 +1661,44 @@ def python_3000_invalid_escape_sequence(logical_line, tokens, noqa): if noqa: return - # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals - valid = [ - '\n', - '\\', - '\'', - '"', - 'a', - 'b', - 'f', - 'n', - 'r', - 't', - 'v', - '0', '1', '2', '3', '4', '5', '6', '7', - 'x', - - # Escape sequences only recognized in string literals - 'N', - 'u', - 'U', - ] - - for token_type, text, start, end, line in tokens: - if token_type == tokenize.STRING: - start_line, start_col = start - quote = text[-3:] if text[-3:] in ('"""', "'''") else text[-1] + prefixes = [] + for token_type, text, start, _, _ in tokens: + if ( + token_type == tokenize.STRING or + token_type == FSTRING_START or + token_type == TSTRING_START + ): # Extract string modifiers (e.g. u or r) - quote_pos = text.index(quote) - prefix = text[:quote_pos].lower() - start = quote_pos + len(quote) - string = text[start:-len(quote)] + prefixes.append(text[:text.index(text[-1])].lower()) - if 'r' not in prefix: - pos = string.find('\\') + if ( + token_type == tokenize.STRING or + token_type == FSTRING_MIDDLE or + token_type == TSTRING_MIDDLE + ): + if 'r' not in prefixes[-1]: + start_line, start_col = start + pos = text.find('\\') while pos >= 0: pos += 1 - if string[pos] not in valid: - line = start_line + string.count('\n', 0, pos) + if text[pos] not in _PYTHON_3000_VALID_ESC: + line = start_line + text.count('\n', 0, pos) if line == start_line: - col = start_col + len(prefix) + len(quote) + pos + col = start_col + pos else: - col = pos - string.rfind('\n', 0, pos) - 1 + col = pos - text.rfind('\n', 0, pos) - 1 yield ( (line, col - 1), - "W605 invalid escape sequence '\\%s'" % - string[pos], + f"W605 invalid escape sequence '\\{text[pos]}'" ) - pos = string.find('\\', pos + 1) - - -@register_check -def python_3000_async_await_keywords(logical_line, tokens): - """'async' and 'await' are reserved keywords starting at Python 3.7. - - W606: async = 42 - W606: await = 42 - Okay: async def read(db):\n data = await db.fetch('SELECT ...') - """ - # The Python tokenize library before Python 3.5 recognizes - # async/await as a NAME token. Therefore, use a state machine to - # look for the possible async/await constructs as defined by the - # Python grammar: - # https://docs.python.org/3/reference/grammar.html + pos = text.find('\\', pos + 1) - state = None - for token_type, text, start, end, line in tokens: - error = False - - if token_type == tokenize.NL: - continue - - if state is None: - if token_type == tokenize.NAME: - if text == 'async': - state = ('async_stmt', start) - elif text == 'await': - state = ('await', start) - elif (token_type == tokenize.NAME and - text in ('def', 'for')): - state = ('define', start) - - elif state[0] == 'async_stmt': - if token_type == tokenize.NAME and text in ('def', 'with', 'for'): - # One of funcdef, with_stmt, or for_stmt. Return to - # looking for async/await names. - state = None - else: - error = True - elif state[0] == 'await': - if token_type == tokenize.NAME: - # An await expression. Return to looking for async/await - # names. - state = None - elif token_type == tokenize.OP and text == '(': - state = None - else: - error = True - elif state[0] == 'define': - if token_type == tokenize.NAME and text in ('async', 'await'): - error = True - else: - state = None - - if error: - yield ( - state[1], - "W606 'async' and 'await' are reserved keywords starting with " - "Python 3.7", - ) - state = None - - # Last token - if state is not None: - yield ( - state[1], - "W606 'async' and 'await' are reserved keywords starting with " - "Python 3.7", - ) + if ( + token_type == tokenize.STRING or + token_type == FSTRING_END or + token_type == TSTRING_END + ): + prefixes.pop() ######################################################################## @@ -1806,7 +1768,7 @@ def readlines(filename): def stdin_get_value(): """Read the value from stdin.""" - return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() + return io.TextIOWrapper(sys.stdin.buffer, errors='ignore').read() noqa = lru_cache(512)(re.compile(r'# no(?:qa|pep8)\b', re.I).search) @@ -1816,15 +1778,6 @@ def expand_indent(line): r"""Return the amount of indentation. Tabs are expanded to the next multiple of 8. - - >>> expand_indent(' ') - 4 - >>> expand_indent('\t') - 8 - >>> expand_indent(' \t') - 8 - >>> expand_indent(' \t') - 16 """ line = line.rstrip('\n\r') if '\t' not in line: @@ -1841,15 +1794,7 @@ def expand_indent(line): def mute_string(text): - """Replace contents with 'xxx' to prevent syntax matching. - - >>> mute_string('"abc"') - '"xxx"' - >>> mute_string("'''abc'''") - "'''xxx'''" - >>> mute_string("r'abc'") - "r'xxx'" - """ + """Replace contents with 'xxx' to prevent syntax matching.""" # String modifiers (e.g. u or r) start = text.index(text[-1]) + 1 end = len(text) - 1 @@ -1950,6 +1895,7 @@ def __init__(self, filename=None, lines=None, self.max_line_length = options.max_line_length self.max_doc_length = options.max_doc_length self.indent_size = options.indent_size + self.fstring_start = self.tstring_start = 0 self.multiline = False # in a multiline string? self.hang_closing = options.hang_closing self.indent_size = options.indent_size @@ -2008,9 +1954,7 @@ def readline(self): def run_check(self, check, argument_names): """Run a check plugin.""" - arguments = [] - for name in argument_names: - arguments.append(getattr(self, name)) + arguments = [getattr(self, name) for name in argument_names] return check(*arguments) def init_checker_state(self, name, argument_names): @@ -2046,6 +1990,11 @@ def build_tokens_line(self): continue if token_type == tokenize.STRING: text = mute_string(text) + elif token_type in {FSTRING_MIDDLE, TSTRING_MIDDLE}: # pragma: >=3.12 cover # noqa: E501 + # fstring tokens are "unescaped" braces -- re-escape! + brace_count = text.count('{') + text.count('}') + text = 'x' * (len(text) + brace_count) + end = (end[0], end[1] + brace_count) if prev_row: (start_row, start_col) = start if prev_row != start_row: # different row @@ -2131,16 +2080,24 @@ def maybe_check_physical(self, token, prev_physical): """If appropriate for token, check current physical line(s).""" # Called after every token, but act only on end of line. + if token.type == FSTRING_START: # pragma: >=3.12 cover + self.fstring_start = token.start[0] + elif token.type == TSTRING_START: # pragma: >=3.14 cover + self.tstring_start = token.start[0] # a newline token ends a single physical line. - if _is_eol_token(token): + elif _is_eol_token(token): # if the file does not end with a newline, the NEWLINE # token is inserted by the parser, but it does not contain # the previous physical line in `token[4]` - if token[4] == '': + if token.line == '': self.check_physical(prev_physical) else: - self.check_physical(token[4]) - elif token[0] == tokenize.STRING and '\n' in token[1]: + self.check_physical(token.line) + elif ( + token.type == tokenize.STRING and '\n' in token.string or + token.type == FSTRING_END or + token.type == TSTRING_END + ): # Less obviously, a string that contains newlines is a # multiline string, either triple-quoted or with internal # newlines backslash-escaped. Check every physical line in @@ -2156,14 +2113,20 @@ def maybe_check_physical(self, token, prev_physical): # - have to wind self.line_number back because initially it # points to the last line of the string, and we want # check_physical() to give accurate feedback - if noqa(token[4]): + if noqa(token.line): return + if token.type == FSTRING_END: # pragma: >=3.12 cover + start = self.fstring_start + elif token.type == TSTRING_END: # pragma: >=3.12 cover + start = self.tstring_start + else: + start = token.start[0] + end = token.end[0] + self.multiline = True - self.line_number = token[2][0] - _, src, (_, offset), _, _ = token - src = self.lines[self.line_number - 1][:offset] + src - for line in src.split('\n')[:-1]: - self.check_physical(line + '\n') + self.line_number = start + for line_number in range(start, end): + self.check_physical(self.lines[line_number - 1] + '\n') self.line_number += 1 self.multiline = False @@ -2403,8 +2366,7 @@ def __init__(self, *args, **kwargs): options.reporter = BaseReport if options.quiet else StandardReport options.select = tuple(options.select or ()) - if not (options.select or options.ignore or - options.testsuite or options.doctest) and DEFAULT_IGNORE: + if not (options.select or options.ignore) and DEFAULT_IGNORE: # The default choice: ignore controversial checks options.ignore = tuple(DEFAULT_IGNORE.split(',')) else: @@ -2574,11 +2536,6 @@ def get_parser(prog='pycodestyle', version=__version__): help="report changes only within line number ranges in " "the unified diff received on STDIN") group = parser.add_option_group("Testing Options") - if os.path.exists(TESTSUITE_PATH): - group.add_option('--testsuite', metavar='dir', - help="run regression tests from dir") - group.add_option('--doctest', action='store_true', - help="run doctest on myself") group.add_option('--benchmark', action='store_true', help="measure processing speed") return parser @@ -2595,7 +2552,7 @@ def read_config(options, args, arglist, parser): merged together (in that order) using the read method of ConfigParser. """ - config = RawConfigParser() + config = configparser.RawConfigParser() cli_conf = options.config @@ -2655,7 +2612,6 @@ def read_config(options, args, arglist, parser): # Third, overwrite with the command-line options (options, __) = parser.parse_args(arglist, values=new_options) - options.doctest = options.testsuite = False return options @@ -2688,17 +2644,14 @@ def process_options(arglist=None, parse_argv=False, config_file=None, if verbose is not None: options.verbose = verbose - if options.ensure_value('testsuite', False): - args.append(options.testsuite) - elif not options.ensure_value('doctest', False): - if parse_argv and not args: - if options.diff or any(os.path.exists(name) - for name in PROJECT_CONFIG): - args = ['.'] - else: - parser.error('input not specified') - options = read_config(options, args, arglist, parser) - options.reporter = parse_argv and options.quiet == 1 and FileReport + if parse_argv and not args: + if options.diff or any(os.path.exists(name) + for name in PROJECT_CONFIG): + args = ['.'] + else: + parser.error('input not specified') + options = read_config(options, args, arglist, parser) + options.reporter = parse_argv and options.quiet == 1 and FileReport options.filename = _parse_multi_options(options.filename) options.exclude = normalize_paths(options.exclude) @@ -2743,11 +2696,7 @@ def _main(): style_guide = StyleGuide(parse_argv=True) options = style_guide.options - if options.doctest or options.testsuite: - from testsuite.support import run_tests - report = run_tests(style_guide) - else: - report = style_guide.check_files() + report = style_guide.check_files() if options.statistics: report.print_statistics() @@ -2755,9 +2704,6 @@ def _main(): if options.benchmark: report.print_benchmark() - if options.testsuite and not options.quiet: - report.print_results() - if report.total_errors: if options.count: sys.stderr.write(str(report.total_errors) + '\n') diff --git a/setup.cfg b/setup.cfg index 73ae4e796..e2bd7c0f3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,52 @@ +[metadata] +name = pycodestyle +version = attr: pycodestyle.__version__ +description = Python style guide checker +long_description = file: README.rst +long_description_content_type = text/x-rst +url = https://pycodestyle.pycqa.org/ +author = Johann C. Rocholl +author_email = johann@rocholl.net +maintainer = Ian Lee +maintainer_email = IanLee1521@gmail.com +license = MIT +license_files = LICENSE +classifiers = + Development Status :: 5 - Production/Stable + Environment :: Console + Intended Audience :: Developers + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3 :: Only + Programming Language :: Python :: Implementation :: CPython + Programming Language :: Python :: Implementation :: PyPy + Topic :: Software Development :: Libraries :: Python Modules +keywords = pycodestyle, pep8, PEP 8, PEP-8, PEP8 +project_urls = + Changes=https://pycodestyle.pycqa.org/en/latest/developer.html#changes + +[options] +py_modules = pycodestyle +python_requires = >=3.10 +include_package_data = True +zip_safe = False + +[options.entry_points] +console_scripts = + pycodestyle = pycodestyle:_main + [bdist_wheel] universal = 1 -[metadata] -license_file = LICENSE - [pycodestyle] -select = ignore = E226,E24,W504 max_line_length = 79 max_doc_length = 72 + +[coverage:run] +plugins = covdefaults +omit = testing/data + +[coverage:report] +fail_under = 93 diff --git a/setup.py b/setup.py index fe96737bf..8bf1ba938 100644 --- a/setup.py +++ b/setup.py @@ -1,59 +1,2 @@ from setuptools import setup - - -def get_version(): - with open('pycodestyle.py') as f: - for line in f: - if line.startswith('__version__'): - return eval(line.split('=')[-1]) - - -def get_long_description(): - descr = [] - for fname in 'README.rst', 'CHANGES.txt': - with open(fname) as f: - descr.append(f.read()) - return '\n\n'.join(descr) - - -setup( - name='pycodestyle', - version=get_version(), - description="Python style guide checker", - long_description=get_long_description(), - keywords='pycodestyle, pep8, PEP 8, PEP-8, PEP8', - author='Johann C. Rocholl', - author_email='johann@rocholl.net', - maintainer='Ian Lee', - maintainer_email='IanLee1521@gmail.com', - url='https://pycodestyle.pycqa.org/', - license='Expat license', - py_modules=['pycodestyle'], - include_package_data=True, - zip_safe=False, - python_requires='>=3.6', - entry_points={ - 'console_scripts': [ - 'pycodestyle = pycodestyle:_main', - ], - }, - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: Implementation :: CPython', - 'Programming Language :: Python :: Implementation :: PyPy', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - project_urls={ - 'Changes': - 'https://pycodestyle.pycqa.org/en/latest/developer.html#changes', - }, -) +setup() diff --git a/testsuite/__init__.py b/testing/__init__.py similarity index 100% rename from testsuite/__init__.py rename to testing/__init__.py diff --git a/testsuite/E10.py b/testing/data/E10.py similarity index 83% rename from testsuite/E10.py rename to testing/data/E10.py index 7b4259455..1a9011248 100644 --- a/testsuite/E10.py +++ b/testing/data/E10.py @@ -1,8 +1,3 @@ -#: E101 W191 -for a in 'abc': - for b in 'xyz': - print a # indented with 8 spaces - print b # indented with 1 tab #: E101 E122 W191 W191 if True: pass diff --git a/testsuite/E11.py b/testing/data/E11.py similarity index 100% rename from testsuite/E11.py rename to testing/data/E11.py diff --git a/testsuite/E12.py b/testing/data/E12.py similarity index 99% rename from testsuite/E12.py rename to testing/data/E12.py index 968382c36..dabac0d75 100644 --- a/testsuite/E12.py +++ b/testing/data/E12.py @@ -366,7 +366,7 @@ def example_issue254(): # more stuff ) ) -#: E701:1:8 E122:2:1 E203:4:8 E128:5:1 +#: E701:1:8 E231:1:8 E122:2:1 E203:4:8 E128:5:1 if True:\ print(True) diff --git a/testsuite/E12not.py b/testing/data/E12not.py similarity index 99% rename from testsuite/E12not.py rename to testing/data/E12not.py index 767765915..a92739e58 100644 --- a/testsuite/E12not.py +++ b/testing/data/E12not.py @@ -429,15 +429,15 @@ def unicode2html(s): help = u"print total number of errors " \ u"to standard error" -help = ur"print total number of errors " \ - ur"to standard error" - help = b"print total number of errors " \ b"to standard error" help = br"print total number of errors " \ br"to standard error" +help = f"print total number of errors " \ + f"to standard error" + d = dict('foo', help="exclude files or directories which match these " "comma separated patterns (default: %s)" % DEFAULT_EXCLUDE) diff --git a/testsuite/E20.py b/testing/data/E20.py similarity index 85% rename from testsuite/E20.py rename to testing/data/E20.py index 20c6dfd80..ed21b213b 100644 --- a/testsuite/E20.py +++ b/testing/data/E20.py @@ -75,4 +75,21 @@ x, y = y, x a[b1, :] == a[b1, ...] b = a[:, b1] +#: E204:1:2 +@ decorator +def f(): + pass +#: E204:1:2 +@ decorator +def f(): + pass +#: E204:1:2 +@ decorator +def f(): + pass +#: E204:2:6 +if True: + @ decorator + def f(): + pass #: diff --git a/testsuite/E21.py b/testing/data/E21.py similarity index 100% rename from testsuite/E21.py rename to testing/data/E21.py diff --git a/testsuite/E22.py b/testing/data/E22.py similarity index 98% rename from testsuite/E22.py rename to testing/data/E22.py index 7ea27927e..ba3f3960c 100644 --- a/testsuite/E22.py +++ b/testing/data/E22.py @@ -98,6 +98,8 @@ c = (a +b)*(a - b) #: E225 E226 c = (a+ b)*(a - b) +#: E225 +x[lambda: None]=1 #: #: E226 diff --git a/testsuite/E23.py b/testing/data/E23.py similarity index 100% rename from testsuite/E23.py rename to testing/data/E23.py diff --git a/testsuite/E24.py b/testing/data/E24.py similarity index 100% rename from testsuite/E24.py rename to testing/data/E24.py diff --git a/testsuite/E25.py b/testing/data/E25.py similarity index 100% rename from testsuite/E25.py rename to testing/data/E25.py diff --git a/testsuite/E26.py b/testing/data/E26.py similarity index 100% rename from testsuite/E26.py rename to testing/data/E26.py diff --git a/testsuite/E27.py b/testing/data/E27.py similarity index 87% rename from testsuite/E27.py rename to testing/data/E27.py index 91aa07904..ca0693069 100644 --- a/testsuite/E27.py +++ b/testing/data/E27.py @@ -49,3 +49,10 @@ pass #: Okay matched = {"true": True, "false": False} +#: E275:2:11 +if True: + assert(1) +#: Okay +def f(): + print((yield)) + x = (yield) diff --git a/testsuite/E30.py b/testing/data/E30.py similarity index 93% rename from testsuite/E30.py rename to testing/data/E30.py index ebe4e9d25..7ef468901 100644 --- a/testsuite/E30.py +++ b/testing/data/E30.py @@ -75,6 +75,20 @@ def a(): #: +#: E303:6:5 +class xyz: + def a(self): + pass + + + def b(self): + pass +#: E303:5:5 +if True: + a = 1 + + + a = 2 #: E304:3:1 @decorator diff --git a/testsuite/E30not.py b/testing/data/E30not.py similarity index 100% rename from testsuite/E30not.py rename to testing/data/E30not.py diff --git a/testsuite/E40.py b/testing/data/E40.py similarity index 100% rename from testsuite/E40.py rename to testing/data/E40.py diff --git a/testsuite/E50.py b/testing/data/E50.py similarity index 91% rename from testsuite/E50.py rename to testing/data/E50.py index bcf3bdce8..4870fcc23 100644 --- a/testsuite/E50.py +++ b/testing/data/E50.py @@ -25,6 +25,13 @@ if (foo is None and bar is "e000" and \ blah == 'yeah'): blah = 'yeahnah' +#: E502 W503 W503 +y = ( + 2 + 2 # \ + + 3 # \ + + 4 \ + + 3 +) # #: Okay a = ('AAA' @@ -69,6 +76,11 @@ #: E501 W505 '''same thing, but this time without a terminal newline in the string long long long long long long long long long long long long long long long long line''' +#: E501 +if True: + x = f""" + covdefaults>=1.2; python_version == '2.7' or python_version == '{py_ver}' + """ # # issue 224 (unavoidable long lines in docstrings) #: Okay @@ -87,6 +99,10 @@ def foo(): """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis pulvinar vitae """ +#: E501 +loooooong = 'looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong' +f"""\ +""" #: Okay """ This diff --git a/testsuite/E70.py b/testing/data/E70.py similarity index 100% rename from testsuite/E70.py rename to testing/data/E70.py diff --git a/testsuite/E71.py b/testing/data/E71.py similarity index 100% rename from testsuite/E71.py rename to testing/data/E71.py diff --git a/testsuite/E72.py b/testing/data/E72.py similarity index 96% rename from testsuite/E72.py rename to testing/data/E72.py index 61e17eb28..5d1046cb0 100644 --- a/testsuite/E72.py +++ b/testing/data/E72.py @@ -5,11 +5,13 @@ if type(res) != type(""): pass #: Okay +res.type("") == "" +#: Okay import types if res == types.IntType: pass -#: E721 +#: Okay import types if type(res) is not types.ListType: @@ -26,9 +28,9 @@ assert type(res) == type((0)) #: E721 assert type(res) != type((1, )) -#: E721 +#: Okay assert type(res) is type((1, )) -#: E721 +#: Okay assert type(res) is not type((1, )) #: E211 E721 assert type(res) == type ([2, ]) diff --git a/testsuite/E73.py b/testing/data/E73.py similarity index 100% rename from testsuite/E73.py rename to testing/data/E73.py diff --git a/testing/data/E74.py b/testing/data/E74.py new file mode 100644 index 000000000..9bb4c5811 --- /dev/null +++ b/testing/data/E74.py @@ -0,0 +1,13 @@ +#: E741:1:8 +lambda l: dict(zip(l, range(len(l)))) +#: E741:1:7 E704:1:1 +def f(l): print(l, l, l) +#: E741:2:12 +x = ( + lambda l: dict(zip(l, range(len(l)))), +) +#: E741:2:12 E741:3:12 +x = ( + lambda l: dict(zip(l, range(len(l)))), + lambda l: dict(zip(l, range(len(l)))), +) diff --git a/testsuite/E90.py b/testing/data/E90.py similarity index 52% rename from testsuite/E90.py rename to testing/data/E90.py index 2c18e9aff..e0a10d045 100644 --- a/testsuite/E90.py +++ b/testing/data/E90.py @@ -1,6 +1,4 @@ #: E901 -} -#: E901 = [x #: E901 E101 W191 while True: @@ -8,14 +6,6 @@ pass except: print 'Whoops' -#: E122 E225 E251 E251 - -# Do not crash if code is invalid -if msg: - errmsg = msg % progress.get(cr_dbname)) - -def lasting(self, duration=300): - progress = self._progress.setdefault('foo', {} #: Okay # Issue #119 diff --git a/testsuite/W19.py b/testing/data/W19.py similarity index 100% rename from testsuite/W19.py rename to testing/data/W19.py diff --git a/testsuite/W29.py b/testing/data/W29.py similarity index 88% rename from testsuite/W29.py rename to testing/data/W29.py index e9ad5800d..f84719763 100644 --- a/testsuite/W29.py +++ b/testing/data/W29.py @@ -14,8 +14,6 @@ class Foo(object): #: W191 W292 noeol if False: pass # indented with tabs -#: W292:1:36 noeol -# This line doesn't have a linefeed #: W292:1:5 E225:1:2 noeol 1+ 1 #: W292:1:27 E261:1:12 noeol diff --git a/testsuite/W39.py b/testing/data/W39.py similarity index 100% rename from testsuite/W39.py rename to testing/data/W39.py diff --git a/testing/data/W60.py b/testing/data/W60.py new file mode 100644 index 000000000..cf719b4df --- /dev/null +++ b/testing/data/W60.py @@ -0,0 +1,33 @@ +#: W605:1:10 +regex = '\.png$' +#: W605:2:1 +regex = ''' +\.png$ +''' +#: W605:2:6 +f( + '\_' +) +#: W605:4:6 +""" +multi-line +literal +with \_ somewhere +in the middle +""" +#: W605:1:3 +f"\d" +#: Okay +regex = r'\.png$' +regex = '\\.png$' +regex = r''' +\.png$ +''' +regex = r''' +\\.png$ +''' +s = '\\' +regex = '\w' # noqa +regex = ''' +\w +''' # noqa diff --git a/testsuite/crlf.py b/testing/data/crlf.py similarity index 100% rename from testsuite/crlf.py rename to testing/data/crlf.py diff --git a/testsuite/latin-1.py b/testing/data/latin-1.py similarity index 100% rename from testsuite/latin-1.py rename to testing/data/latin-1.py diff --git a/testsuite/noqa.py b/testing/data/noqa.py similarity index 75% rename from testsuite/noqa.py rename to testing/data/noqa.py index 02fdd4f82..3d02492e5 100644 --- a/testsuite/noqa.py +++ b/testing/data/noqa.py @@ -12,4 +12,10 @@ a = 1 if a == None: # noqa pass + +# should silence E501 +s = f''' +loong {y} looooooooooooooong loooooooooooooong looooooooong loooooooong looooooooong +{x} +''' # noqa #: diff --git a/testsuite/python3.py b/testing/data/python3.py similarity index 100% rename from testsuite/python3.py rename to testing/data/python3.py diff --git a/testsuite/python310.py b/testing/data/python310.py similarity index 100% rename from testsuite/python310.py rename to testing/data/python310.py diff --git a/testing/data/python311.py b/testing/data/python311.py new file mode 100644 index 000000000..a405125a9 --- /dev/null +++ b/testing/data/python311.py @@ -0,0 +1,23 @@ +#: Okay +try: + ... +except* OSError as e: + pass +#: Okay +from typing import Generic +from typing import TypeVarTuple + + +Ts = TypeVarTuple('Ts') + + +class Shape(Generic[*Ts]): + pass + + +def f(*args: *Ts) -> None: + ... + + +def g(x: Shape[*Ts]) -> Shape[*Ts]: + ... diff --git a/testing/data/python312.py b/testing/data/python312.py new file mode 100644 index 000000000..aabb6a426 --- /dev/null +++ b/testing/data/python312.py @@ -0,0 +1,31 @@ +#: Okay +# https://github.com/python/cpython/issues/90432: fixed in 3.12 +def foo(): + pas + +\ + +def bar(): + pass +#: Okay +# new type aliases +type X = int | str +type Y[T] = list[T] +type Z[T: str] = list[T] +#: Okay +# new generics +def f[T](x: T) -> T: + pass + + +def g[T: str, U: int](x: T, y: U) -> dict[T, U]: + pass +#: Okay +# new nested f-strings +f'{ + thing +} {f'{other} {thing}'}' +#: E201:1:4 E202:1:17 +f'{ an_error_now }' +#: Okay +f'{x:02x}' diff --git a/testing/data/python313.py b/testing/data/python313.py new file mode 100644 index 000000000..ae70e427b --- /dev/null +++ b/testing/data/python313.py @@ -0,0 +1,22 @@ +type Alias[T: (int, str) = str] = list[T] +type Alias2[T = str] = list[T] + + +class C[T: (int, str) = str]: + pass + + +class C2[T = str]: + pass + + +class C3[T, U: str = str]: + pass + + +def f[T: (int, str) = str](t: T) -> T: + pass + + +def f2[T = str](t: T) -> T: + pass diff --git a/testing/data/python314.py b/testing/data/python314.py new file mode 100644 index 000000000..6f96df7fa --- /dev/null +++ b/testing/data/python314.py @@ -0,0 +1,19 @@ +#: Okay +try: + raise AssertionError('hi') +except AssertionError, ValueError: + pass + +t'hello {world}' +t'{hello}:{world}' +t'in{x}' +t'hello{world=}' +#: Okay +# new nested f-strings +t'{ + thing +} {t'{other} {thing}'}' +#: E201:1:4 E202:1:17 +t'{ an_error_now }' +#: Okay +t'{x:02x}' diff --git a/testsuite/python35.py b/testing/data/python35.py similarity index 100% rename from testsuite/python35.py rename to testing/data/python35.py diff --git a/testing/data/python36.py b/testing/data/python36.py new file mode 100644 index 000000000..aefd6540c --- /dev/null +++ b/testing/data/python36.py @@ -0,0 +1,3 @@ +#: Okay +f'{hello}:{world}' +f'in{x}' diff --git a/testsuite/python38.py b/testing/data/python38.py similarity index 89% rename from testsuite/python38.py rename to testing/data/python38.py index 8bf0d4d0e..44737fedc 100644 --- a/testsuite/python38.py +++ b/testing/data/python38.py @@ -53,3 +53,11 @@ def f3( #: E221:1:6 E221:1:19 if (x := 1) == (y := 2): pass +#: E741 +while l := 1: + pass +#: E741 +if (l := 1): + pass +#: Okay +f'{x=}' diff --git a/testing/data/python39.py b/testing/data/python39.py new file mode 100644 index 000000000..723f0e1ce --- /dev/null +++ b/testing/data/python39.py @@ -0,0 +1,2 @@ +#: W292:1:70 noeol +# This line doesn't have a linefeed (in 3.8 this is reported thrice!) diff --git a/testsuite/utf-8-bom.py b/testing/data/utf-8-bom.py similarity index 100% rename from testsuite/utf-8-bom.py rename to testing/data/utf-8-bom.py diff --git a/testsuite/utf-8.py b/testing/data/utf-8.py similarity index 100% rename from testsuite/utf-8.py rename to testing/data/utf-8.py diff --git a/testing/support.py b/testing/support.py new file mode 100644 index 000000000..20a0eab83 --- /dev/null +++ b/testing/support.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import os.path + +from pycodestyle import BaseReport +from pycodestyle import StyleGuide + +ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + +class InMemoryReport(BaseReport): + """ + Collect the results in memory, without printing anything. + """ + + def __init__(self, options): + super().__init__(options) + self.in_memory_errors = [] + + def error(self, line_number, offset, text, check): + """ + Report an error, according to options. + """ + code = text[:4] + self.in_memory_errors.append(f'{code}:{line_number}:{offset + 1}') + return super().error(line_number, offset, text, check) + + +def errors_from_src(src: str) -> list[str]: + guide = StyleGuide(select=('E', 'W'), max_doc_length=72) + reporter = guide.init_report(InMemoryReport) + guide.input_file( + filename='in-memory-test-file.py', + lines=src.splitlines(True), + ) + return reporter.in_memory_errors diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_E101.py b/tests/test_E101.py new file mode 100644 index 000000000..e3f2e5d00 --- /dev/null +++ b/tests/test_E101.py @@ -0,0 +1,18 @@ +"""moved from data files due to 3.12 making this a TokenError""" +import sys +import unittest + +from testing.support import errors_from_src + + +class E101Test(unittest.TestCase): + def test_E101(self): + errors = errors_from_src( + 'if True:\n' + '\tprint(1) # tabs\n' + ' print(2) # spaces\n' + ) + if sys.version_info >= (3, 12): # pragma: >=3.12 cover + self.assertEqual(errors, ['W191:2:1', 'E901:3:28']) + else: # pragma: <3.12 cover + self.assertEqual(errors, ['W191:2:1', 'E101:3:1']) diff --git a/tests/test_E901.py b/tests/test_E901.py new file mode 100644 index 000000000..caf68d503 --- /dev/null +++ b/tests/test_E901.py @@ -0,0 +1,29 @@ +"""moved from data files due to 3.12 changing syntax errors""" +import sys +import unittest + +from testing.support import errors_from_src + + +class E901Test(unittest.TestCase): + def test_closing_brace(self): + errors = errors_from_src('}\n') + if sys.version_info < (3, 12): # pragma: <3.12 cover + self.assertEqual(errors, ['E901:2:1']) + else: # pragma: >=3.12 cover + self.assertEqual(errors, []) + + def test_unclosed_brace(self): + src = '''\ +if msg: + errmsg = msg % progress.get(cr_dbname)) + +def lasting(self, duration=300): + progress = self._progress.setdefault('foo', {} +''' + errors = errors_from_src(src) + if sys.version_info < (3, 12): # pragma: <3.12 cover + expected = ['E122:4:1'] + else: # pragma: >=3.12 cover + expected = ['E122:4:1', 'E901:5:1'] # noqa: E501 + self.assertEqual(errors, expected) diff --git a/tests/test_all.py b/tests/test_all.py new file mode 100644 index 000000000..99a811839 --- /dev/null +++ b/tests/test_all.py @@ -0,0 +1,12 @@ +import os.path + +import pycodestyle +from testing.support import ROOT + + +def test_own_dog_food(): + style = pycodestyle.StyleGuide(select='E,W', quiet=True) + files = [pycodestyle.__file__, __file__, os.path.join(ROOT, 'setup.py')] + report = style.init_report(pycodestyle.StandardReport) + report = style.check_files(files) + assert list(report.messages) == ['W504'], f'Failures: {report.messages}' diff --git a/testsuite/test_api.py b/tests/test_api.py similarity index 86% rename from testsuite/test_api.py rename to tests/test_api.py index 8dde32ff1..50cb1b831 100644 --- a/testsuite/test_api.py +++ b/tests/test_api.py @@ -1,16 +1,16 @@ -# -*- coding: utf-8 -*- +import io import os.path import shlex import sys import unittest import pycodestyle -from testsuite.support import ROOT_DIR, PseudoFile +from testing.support import ROOT -E11 = os.path.join(ROOT_DIR, 'testsuite', 'E11.py') +E11 = os.path.join(ROOT, 'testing', 'data', 'E11.py') -class DummyChecker(object): +class DummyChecker: def __init__(self, tree, filename): pass @@ -26,8 +26,8 @@ def setUp(self): self._saved_stdout = sys.stdout self._saved_stderr = sys.stderr self._saved_checks = pycodestyle._checks - sys.stdout = PseudoFile() - sys.stderr = PseudoFile() + sys.stdout = io.StringIO() + sys.stderr = io.StringIO() pycodestyle._checks = { k: {f: (vals[0][:], vals[1]) for (f, vals) in v.items()} for k, v in self._saved_checks.items() @@ -39,7 +39,10 @@ def tearDown(self): pycodestyle._checks = self._saved_checks def reset(self): - del sys.stdout[:], sys.stderr[:] + sys.stdout.seek(0) + sys.stdout.truncate() + sys.stderr.seek(0) + sys.stderr.truncate() def test_register_physical_check(self): def check_dummy(physical_line, line_number): @@ -107,8 +110,8 @@ def check_dummy(logical, tokens): def test_styleguide(self): report = pycodestyle.StyleGuide().check_files() self.assertEqual(report.total_errors, 0) - self.assertFalse(sys.stdout) - self.assertFalse(sys.stderr) + self.assertFalse(sys.stdout.getvalue()) + self.assertFalse(sys.stderr.getvalue()) self.reset() report = pycodestyle.StyleGuide().check_files(['missing-file']) @@ -116,15 +119,15 @@ def test_styleguide(self): self.assertEqual(len(stdout), report.total_errors) self.assertEqual(report.total_errors, 1) # < 3.3 returns IOError; >= 3.3 returns FileNotFoundError - self.assertTrue(stdout[0].startswith("missing-file:1:1: E902 ")) - self.assertFalse(sys.stderr) + assert stdout[0].startswith("missing-file:1:1: E902 ") + self.assertFalse(sys.stderr.getvalue()) self.reset() report = pycodestyle.StyleGuide().check_files([E11]) stdout = sys.stdout.getvalue().splitlines() self.assertEqual(len(stdout), report.total_errors) self.assertEqual(report.total_errors, 24) - self.assertFalse(sys.stderr) + self.assertFalse(sys.stderr.getvalue()) self.reset() # Passing the paths in the constructor gives same result @@ -132,7 +135,7 @@ def test_styleguide(self): stdout = sys.stdout.getvalue().splitlines() self.assertEqual(len(stdout), report.total_errors) self.assertEqual(report.total_errors, 24) - self.assertFalse(sys.stderr) + self.assertFalse(sys.stderr.getvalue()) self.reset() def test_styleguide_options(self): @@ -148,10 +151,10 @@ def test_styleguide_options(self): # Check unset options for o in ('benchmark', 'config', 'count', 'diff', - 'doctest', 'quiet', 'show_pep8', 'show_source', - 'statistics', 'testsuite', 'verbose'): + 'quiet', 'show_pep8', 'show_source', + 'statistics', 'verbose'): oval = getattr(pep8style.options, o) - self.assertTrue(oval in (None, False), msg='%s = %r' % (o, oval)) + self.assertTrue(oval in (None, False), msg=f'{o} = {oval!r}') # Check default options self.assertTrue(pep8style.options.repeat) @@ -183,10 +186,6 @@ def parse_argv(argstring): ('E121', 'E123', 'E126', 'E226', 'E24', 'E704', 'W503', 'W504') ) - options = parse_argv('--doctest').options - self.assertEqual(options.select, ()) - self.assertEqual(options.ignore, ()) - options = parse_argv('--ignore E,W').options self.assertEqual(options.select, ()) self.assertEqual(options.ignore, ('E', 'W')) @@ -329,12 +328,18 @@ def test_check_nullbytes(self): count_errors = pep8style.input_file('stdin', lines=['\x00\n']) stdout = sys.stdout.getvalue() - expected = "stdin:1:1: E901 ValueError" - self.assertTrue(stdout.startswith(expected), - msg='Output %r does not start with %r' % - (stdout, expected)) - self.assertFalse(sys.stderr) - self.assertEqual(count_errors, 1) + if sys.version_info < (3, 11, 4): # pragma: <3.11 cover + expected = ["stdin:1:1: E901 ValueError: source code string cannot contain null bytes"] # noqa: E501 + elif sys.version_info < (3, 12): # pragma: <3.12 cover # pragma: >=3.11 cover # noqa: E501 + expected = ["stdin:1:1: E901 SyntaxError: source code string cannot contain null bytes"] # noqa: E501 + else: # pragma: >=3.12 cover + expected = [ + "stdin:1:1: E901 SyntaxError: source code string cannot contain null bytes", # noqa: E501 + "stdin:1:1: E901 TokenError: source code cannot contain null bytes", # noqa: E501 + ] + self.assertEqual(stdout.splitlines(), expected) + self.assertFalse(sys.stderr.getvalue()) + self.assertEqual(count_errors, len(expected)) def test_styleguide_unmatched_triple_quotes(self): pycodestyle.register_check(DummyChecker, ['Z701']) @@ -347,35 +352,17 @@ def test_styleguide_unmatched_triple_quotes(self): pep8style.input_file('stdin', lines=lines) stdout = sys.stdout.getvalue() - expected = 'stdin:2:5: E901 TokenError: EOF in multi-line string' - self.assertTrue(expected in stdout) - - def test_styleguide_continuation_line_outdented(self): - pycodestyle.register_check(DummyChecker, ['Z701']) - lines = [ - 'def foo():\n', - ' pass\n', - '\n', - '\\\n', - '\n', - 'def bar():\n', - ' pass\n', - ] - - pep8style = pycodestyle.StyleGuide() - count_errors = pep8style.input_file('stdin', lines=lines) - self.assertEqual(count_errors, 2) - stdout = sys.stdout.getvalue() - expected = ( - 'stdin:6:1: ' - 'E122 continuation line missing indentation or outdented' - ) - self.assertTrue(expected in stdout) - expected = 'stdin:6:1: E302 expected 2 blank lines, found 1' - self.assertTrue(expected in stdout) - - # TODO: runner - # TODO: input_file + if sys.version_info < (3, 12): # pragma: <3.12 cover # noqa: E501 + expected = [ + 'stdin:2:5: E901 TokenError: EOF in multi-line string', + 'stdin:2:6: E901 SyntaxError: unterminated triple-quoted string literal (detected at line 2)', # noqa: E501 + ] + else: # pragma: >=3.12 cover + expected = [ + 'stdin:2:6: E901 SyntaxError: unterminated triple-quoted string literal (detected at line 2)', # noqa: E501 + 'stdin:2:6: E901 TokenError: EOF in multi-line string', + ] + self.assertEqual(stdout.splitlines(), expected) def test_styleguides_other_indent_size(self): pycodestyle.register_check(DummyChecker, ['Z701']) diff --git a/testsuite/test_blank_lines.py b/tests/test_blank_lines.py similarity index 87% rename from testsuite/test_blank_lines.py rename to tests/test_blank_lines.py index e239f8b73..d1f78eae1 100644 --- a/testsuite/test_blank_lines.py +++ b/tests/test_blank_lines.py @@ -6,7 +6,7 @@ import unittest import pycodestyle -from testsuite.support import InMemoryReport +from testing.support import errors_from_src class BlankLinesTestCase(unittest.TestCase): @@ -14,18 +14,6 @@ class BlankLinesTestCase(unittest.TestCase): Common code for running blank_lines tests. """ - def check(self, content): - """ - Run checks on `content` and return the the list of errors. - """ - sut = pycodestyle.StyleGuide() - reporter = sut.init_report(InMemoryReport) - sut.input_file( - filename='in-memory-test-file.py', - lines=content.splitlines(True), - ) - return reporter.in_memory_errors - def assertNoErrors(self, actual): """ Check that the actual result from the checker has no errors. @@ -43,7 +31,7 @@ def test_initial_no_blank(self): """ It will accept no blank lines at the start of the file. """ - result = self.check("""def some_function(): + result = errors_from_src("""def some_function(): pass """) @@ -54,7 +42,7 @@ def test_initial_lines_one_blank(self): It will accept 1 blank lines before the first line of actual code, even if in other places it asks for 2 """ - result = self.check(""" + result = errors_from_src(""" def some_function(): pass """) @@ -66,7 +54,7 @@ def test_initial_lines_two_blanks(self): It will accept 2 blank lines before the first line of actual code, as normal. """ - result = self.check(""" + result = errors_from_src(""" def some_function(): pass @@ -79,7 +67,7 @@ def test_method_less_blank_lines(self): It will trigger an error when less than 1 blank lin is found before method definitions. """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. class X: def a(): @@ -96,7 +84,7 @@ def test_method_less_blank_lines_comment(self): It will trigger an error when less than 1 blank lin is found before method definition, ignoring comments. """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. class X: def a(): @@ -114,7 +102,7 @@ def test_top_level_fewer_blank_lines(self): It will trigger an error when less 2 blank lines are found before top level definitions. """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. # Second line of comment. def some_function(): @@ -148,7 +136,7 @@ def test_top_level_more_blank_lines(self): It will trigger an error when more 2 blank lines are found before top level definitions. """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. # Second line of comment. @@ -179,7 +167,7 @@ def test_method_more_blank_lines(self): It will trigger an error when more than 1 blank line is found before method definition """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. class SomeCloseClass(object): @@ -211,7 +199,7 @@ def test_initial_lines_more_blank(self): It will trigger an error for more than 2 blank lines before the first line of actual code. """ - result = self.check(""" + result = errors_from_src(""" def some_function(): @@ -224,7 +212,7 @@ def test_blank_line_between_decorator(self): It will trigger an error when the decorator is followed by a blank line. """ - result = self.check("""# First line. + result = errors_from_src("""# First line. @some_decorator @@ -247,7 +235,7 @@ def test_blank_line_decorator(self): It will accept the decorators which are adjacent to the function and method definition. """ - result = self.check("""# First line. + result = errors_from_src("""# First line. @another_decorator @@ -269,7 +257,7 @@ def test_top_level_fewer_follow_lines(self): It will trigger an error when less than 2 blank lines are found between a top level definitions and other top level code. """ - result = self.check(""" + result = errors_from_src(""" def a(): print('Something') @@ -285,7 +273,7 @@ def test_top_level_fewer_follow_lines_comments(self): found between a top level definitions and other top level code, even if we have comments before """ - result = self.check(""" + result = errors_from_src(""" def a(): print('Something') @@ -306,7 +294,7 @@ def test_top_level_good_follow_lines(self): It not trigger an error when 2 blank lines are found between a top level definitions and other top level code. """ - result = self.check(""" + result = errors_from_src(""" def a(): print('Something') @@ -326,7 +314,7 @@ def test_method_fewer_follow_lines(self): It will trigger an error when less than 1 blank line is found between a method and previous definitions. """ - result = self.check(""" + result = errors_from_src(""" def a(): x = 1 def b(): @@ -342,7 +330,7 @@ def test_method_nested_fewer_follow_lines(self): found between a method and previous definitions, even when nested. """ - result = self.check(""" + result = errors_from_src(""" def a(): x = 2 @@ -361,7 +349,7 @@ def test_method_nested_less_class(self): between a method and previous definitions, even when used to define a class. """ - result = self.check(""" + result = errors_from_src(""" def a(): x = 1 class C: @@ -377,7 +365,7 @@ def test_method_nested_ok(self): found between a method and previous definitions, even when nested. """ - result = self.check(""" + result = errors_from_src(""" def a(): x = 2 @@ -412,7 +400,7 @@ def test_initial_lines_one_blanks(self): It will accept less than 3 blank lines before the first line of actual code. """ - result = self.check(""" + result = errors_from_src(""" def some_function(): @@ -426,7 +414,7 @@ def test_initial_lines_tree_blanks(self): It will accept 3 blank lines before the first line of actual code, as normal. """ - result = self.check(""" + result = errors_from_src(""" def some_function(): @@ -440,7 +428,7 @@ def test_top_level_fewer_blank_lines(self): It will trigger an error when less 3 blank lines are found before top level definitions. """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. # Second line of comment. @@ -479,7 +467,7 @@ def test_top_level_more_blank_lines(self): It will trigger an error when more 2 blank lines are found before top level definitions. """ - result = self.check("""# First comment line. + result = errors_from_src("""# First comment line. # Second line of comment. @@ -513,7 +501,7 @@ def test_the_right_blanks(self): """ It will accept 3 blank for top level and 2 for nested. """ - result = self.check(""" + result = errors_from_src(""" def some_function(): diff --git a/tests/test_data.py b/tests/test_data.py new file mode 100644 index 000000000..80a5529b5 --- /dev/null +++ b/tests/test_data.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import collections +import os.path +import re +import sys + +import pytest + +import pycodestyle +from testing.support import errors_from_src +from testing.support import ROOT + +PY_RE = re.compile(r'^python(\d)(\d*)\.py$') +CASE_RE = re.compile('^(#:.*\n)', re.MULTILINE) + + +def _nsort(items: list[str]) -> list[str]: + return sorted( + items, + key=lambda s: [ + int(part) if part.isdigit() else part.lower() + for part in re.split(r'(\d+)', s) + ], + ) + + +def get_tests(): + ret = [] + for fname in _nsort(os.listdir(os.path.join(ROOT, 'testing', 'data'))): + match = PY_RE.match(fname) + if match is not None: + major, minor = int(match[1]), int(match[2] or '0') + mark = pytest.mark.skipif( + sys.version_info < (major, minor), + reason=f'requires Python {major}.{minor}', + ) + else: + mark = () + + fname = os.path.join('testing', 'data', fname) + fname_full = os.path.join(ROOT, fname) + src = ''.join(pycodestyle.readlines(fname_full)) + + line = 1 + parts_it = iter(CASE_RE.split(src)) + # the first case will not have a comment for it + s = next(parts_it) + if s.strip(): + id_s = f'{fname}:{line}' + ret.append(pytest.param('#: Okay', s, id=id_s, marks=mark)) + line += s.count('\n') + + for comment, s in zip(parts_it, parts_it): + if s.strip(): + id_s = f'{fname}:{line}' + ret.append(pytest.param(comment, s, id=id_s, marks=mark)) + line += s.count('\n') + 1 + + assert ret + return ret + + +@pytest.mark.parametrize(('case', 's'), get_tests()) +def test(case, s): + codes = collections.Counter() + exact = collections.Counter() + + assert case.startswith('#:') + for code in case[2:].strip().split(): + if code == 'Okay': + continue + elif code == 'noeol': + s = s.rstrip('\n') + elif ':' in code: + exact[code] += 1 + else: + codes[code] += 1 + + unexpected = collections.Counter() + for code in errors_from_src(s): + if exact[code]: + exact[code] -= 1 + elif codes[code[:4]]: + codes[code[:4]] -= 1 + else: # pragma: no cover + unexpected[code] += 1 + + messages = ( + *(f'-{k}\n' for k, v in codes.items() for _ in range(v)), + *(f'-{k}\n' for k, v in exact.items() for _ in range(v)), + *(f'+{k}\n' for k, v in unexpected.items() for _ in range(v)), + ) + if messages: # pragma: no cover + raise AssertionError(f'unexpected codes!\n{"".join(messages)}') diff --git a/testsuite/test_parser.py b/tests/test_parser.py similarity index 100% rename from testsuite/test_parser.py rename to tests/test_parser.py diff --git a/tests/test_pycodestyle.py b/tests/test_pycodestyle.py new file mode 100644 index 000000000..444d59f0f --- /dev/null +++ b/tests/test_pycodestyle.py @@ -0,0 +1,48 @@ +import io +import sys +import tokenize + +import pytest + +from pycodestyle import Checker +from pycodestyle import expand_indent +from pycodestyle import mute_string + + +@pytest.mark.parametrize( + ('s', 'expected'), + ( + (' ', 4), + ('\t', 8), + (' \t', 8), + (' \t', 16), + ), +) +def test_expand_indent(s, expected): + assert expand_indent(s) == expected + + +@pytest.mark.parametrize( + ('s', 'expected'), + ( + ('"abc"', '"xxx"'), + ("'''abc'''", "'''xxx'''"), + ("r'abc'", "r'xxx'"), + ), +) +def test_mute_string(s, expected): + assert mute_string(s) == expected + + +def test_fstring_logical_line(): + src = '''\ +f'hello {{ {thing} }} world' +''' + checker = Checker(lines=src.splitlines()) + checker.tokens = list(tokenize.generate_tokens(io.StringIO(src).readline)) + checker.build_tokens_line() + + if sys.version_info >= (3, 12): # pragma: >3.12 cover + assert checker.logical_line == "f'xxxxxxxxx{thing}xxxxxxxxx'" + else: + assert checker.logical_line == "f'xxxxxxxxxxxxxxxxxxxxxxxxx'" diff --git a/tests/test_self_doctests.py b/tests/test_self_doctests.py new file mode 100644 index 000000000..e2f10ba6e --- /dev/null +++ b/tests/test_self_doctests.py @@ -0,0 +1,38 @@ +import re + +import pytest + +import pycodestyle +from testing.support import errors_from_src + +SELFTEST_REGEX = re.compile(r'\b(Okay|[EW]\d{3}): (.*)') + + +def get_tests(): + ret = [ + pytest.param( + match[1], + match[2], + id=f'pycodestyle.py:{f.__code__.co_firstlineno}:{f.__name__}@{i}', + ) + for group in pycodestyle._checks.values() + for f in group + if f.__doc__ is not None + for i, match in enumerate(SELFTEST_REGEX.finditer(f.__doc__)) + ] + assert ret + return tuple(ret) + + +@pytest.mark.parametrize(('expected', 's'), get_tests()) +def test(expected, s): + s = '\n'.join((*s.replace(r'\t', '\t').split(r'\n'), '')) + errors = errors_from_src(s) + if expected == 'Okay': + assert errors == [] + else: + for error in errors: + if error.startswith(f'{expected}:'): + break + else: + raise AssertionError(f'expected {expected} from {s!r}') diff --git a/testsuite/test_shell.py b/tests/test_shell.py similarity index 83% rename from testsuite/test_shell.py rename to tests/test_shell.py index 059d88238..e4a75e1c1 100644 --- a/testsuite/test_shell.py +++ b/tests/test_shell.py @@ -1,10 +1,11 @@ -# -*- coding: utf-8 -*- +import configparser +import io import os.path import sys import unittest import pycodestyle -from testsuite.support import ROOT_DIR, PseudoFile +from testing.support import ROOT class ShellTestCase(unittest.TestCase): @@ -15,17 +16,17 @@ def setUp(self): self._saved_stdout = sys.stdout self._saved_stderr = sys.stderr self._saved_pconfig = pycodestyle.PROJECT_CONFIG - self._saved_cpread = pycodestyle.RawConfigParser._read + self._saved_cpread = configparser.RawConfigParser._read self._saved_stdin_get_value = pycodestyle.stdin_get_value self._config_filenames = [] self.stdin = '' sys.argv = ['pycodestyle'] - sys.stdout = PseudoFile() - sys.stderr = PseudoFile() + sys.stdout = io.StringIO() + sys.stderr = io.StringIO() def fake_config_parser_read(cp, fp, filename): self._config_filenames.append(filename) - pycodestyle.RawConfigParser._read = fake_config_parser_read + configparser.RawConfigParser._read = fake_config_parser_read pycodestyle.stdin_get_value = self.stdin_get_value def tearDown(self): @@ -33,14 +34,17 @@ def tearDown(self): sys.stdout = self._saved_stdout sys.stderr = self._saved_stderr pycodestyle.PROJECT_CONFIG = self._saved_pconfig - pycodestyle.RawConfigParser._read = self._saved_cpread + configparser.RawConfigParser._read = self._saved_cpread pycodestyle.stdin_get_value = self._saved_stdin_get_value def stdin_get_value(self): return self.stdin def pycodestyle(self, *args): - del sys.stdout[:], sys.stderr[:] + sys.stdout.seek(0) + sys.stdout.truncate() + sys.stderr.seek(0) + sys.stderr.truncate() sys.argv[1:] = args try: pycodestyle._main() @@ -72,7 +76,7 @@ def test_print_usage(self): self.assertFalse(self._config_filenames) def test_check_simple(self): - E11 = os.path.join(ROOT_DIR, 'testsuite', 'E11.py') + E11 = os.path.join(ROOT, 'testing', 'data', 'E11.py') stdout, stderr, errcode = self.pycodestyle(E11) stdout = stdout.splitlines() self.assertEqual(errcode, 1) @@ -124,8 +128,8 @@ def test_check_noarg(self): def test_check_diff(self): pycodestyle.PROJECT_CONFIG = () diff_lines = [ - "--- testsuite/E11.py 2006-06-01 08:49:50 +0500", - "+++ testsuite/E11.py 2008-04-06 17:36:29 +0500", + "--- testing/data/E11.py 2006-06-01 08:49:50 +0500", + "+++ testing/data/E11.py 2008-04-06 17:36:29 +0500", "@@ -2,4 +2,7 @@", " if x > 2:", " print x", @@ -148,8 +152,10 @@ def test_check_diff(self): self.assertEqual(y, str(col)) self.assertTrue(msg.startswith(' E11')) - diff_lines[:2] = ["--- a/testsuite/E11.py 2006-06-01 08:49 +0400", - "+++ b/testsuite/E11.py 2008-04-06 17:36 +0400"] + diff_lines[:2] = [ + "--- a/testing/data/E11.py 2006-06-01 08:49 +0400", + "+++ b/testing/data/E11.py 2008-04-06 17:36 +0400", + ] self.stdin = '\n'.join(diff_lines) stdout, stderr, errcode = self.pycodestyle('--diff') stdout = stdout.splitlines() @@ -162,19 +168,21 @@ def test_check_diff(self): self.assertTrue(msg.startswith(' E11')) # issue #127, #137: one-line chunks - diff_lines[:-1] = ["diff --git a/testsuite/E11.py b/testsuite/E11.py", - "index 8735e25..2ecb529 100644", - "--- a/testsuite/E11.py", - "+++ b/testsuite/E11.py", - "@@ -5,0 +6 @@ if True:", - "+ print"] + diff_lines[:-1] = [ + "diff --git a/testing/data/E11.py b/testing/data/E11.py", + "index 8735e25..2ecb529 100644", + "--- a/testing/data/E11.py", + "+++ b/testing/data/E11.py", + "@@ -5,0 +6 @@ if True:", + "+ print", + ] self.stdin = '\n'.join(diff_lines) stdout, stderr, errcode = self.pycodestyle('--diff') stdout = stdout.splitlines() self.assertEqual(errcode, 1) self.assertFalse(stderr) - self.assertTrue('testsuite/E11.py:6:6: E111 ' in stdout[0]) - self.assertTrue('testsuite/E11.py:6:6: E117 ' in stdout[1]) + self.assertTrue('testing/data/E11.py:6:6: E111 ' in stdout[0]) + self.assertTrue('testing/data/E11.py:6:6: E117 ' in stdout[1]) # missing '--diff' self.stdin = '\n'.join(diff_lines) @@ -186,7 +194,7 @@ def test_check_diff(self): )) # no matching file in the diff - diff_lines[3] = "+++ b/testsuite/lost/E11.py" + diff_lines[3] = "+++ b/testing/lost/E11.py" self.stdin = '\n'.join(diff_lines) stdout, stderr, errcode = self.pycodestyle('--diff') self.assertFalse(errcode) diff --git a/testsuite/test_util.py b/tests/test_util.py similarity index 94% rename from testsuite/test_util.py rename to tests/test_util.py index 075b163c9..ce3058a92 100644 --- a/testsuite/test_util.py +++ b/tests/test_util.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- import os import unittest diff --git a/testsuite/W60.py b/testsuite/W60.py deleted file mode 100644 index 5003677d4..000000000 --- a/testsuite/W60.py +++ /dev/null @@ -1,98 +0,0 @@ -#: W601 -if a.has_key("b"): - print a -#: W602 -raise DummyError, "Message" -#: W602 -raise ValueError, "hello %s %s" % (1, 2) -#: Okay -raise type_, val, tb -raise Exception, Exception("f"), t -#: W603 -if x <> 0: - x = 0 -#: W604 -val = `1 + 2` -#: W605:1:10 -regex = '\.png$' -#: W605:2:1 -regex = ''' -\.png$ -''' -#: W605:2:6 -f( - '\_' -) -#: W605:4:6 -""" -multi-line -literal -with \_ somewhere -in the middle -""" -#: Okay -regex = r'\.png$' -regex = '\\.png$' -regex = r''' -\.png$ -''' -regex = r''' -\\.png$ -''' -s = '\\' -regex = '\w' # noqa -regex = ''' -\w -''' # noqa -#: W606 -async = 42 -#: W606 -await = 42 -#: W606 -await 42 -#: W606 -await 'test' -#: W606 -def async(): - pass -#: W606 -def await(): - pass -#: W606 -class async: - pass -#: W606 -class await: - pass -#: Okay -async def read_data(db): - data = await db.fetch('SELECT ...') -#: Okay -if await fut: - pass -if (await fut): - pass -if await fut + 1: - pass -if (await fut) + 1: - pass -pair = await fut, 'spam' -pair = (await fut), 'spam' -with await fut, open(): - pass -with (await fut), open(): - pass -await foo()['spam'].baz()() -return await coro() -return (await coro()) -res = await coro() ** 2 -res = (await coro()) ** 2 -func(a1=await coro(), a2=0) -func(a1=(await coro()), a2=0) -await foo() + await bar() -(await foo()) + (await bar()) --await foo() --(await foo()) -(await - foo()) -await(await foo()) diff --git a/testsuite/support.py b/testsuite/support.py deleted file mode 100644 index eb8b4436f..000000000 --- a/testsuite/support.py +++ /dev/null @@ -1,224 +0,0 @@ -# -*- coding: utf-8 -*- -import os.path -import re -import sys - -from pycodestyle import Checker, BaseReport, StandardReport, readlines - -SELFTEST_REGEX = re.compile(r'\b(Okay|[EW]\d{3}):\s(.*)') -ROOT_DIR = os.path.dirname(os.path.dirname(__file__)) - - -class PseudoFile(list): - """Simplified file interface.""" - write = list.append - - def getvalue(self): - return ''.join(self) - - def flush(self): - pass - - -class TestReport(StandardReport): - """Collect the results for the tests.""" - - def __init__(self, options): - options.benchmark_keys += ['test cases', 'failed tests'] - super(TestReport, self).__init__(options) - self._verbose = options.verbose - - def error(self, line_number, offset, text, check): - """Report an error, according to options.""" - code = text[:4] - if code in self.counters: - self.counters[code] += 1 - else: - self.counters[code] = 1 - detailed_code = '%s:%s:%s' % (code, line_number, offset + 1) - # Don't care about expected errors or warnings - if code in self.expected or detailed_code in self.expected: - return - self._deferred_print.append( - (line_number, offset, detailed_code, text[5:], check.__doc__)) - self.file_errors += 1 - self.total_errors += 1 - return code - - def get_file_results(self): - # Check if the expected errors were found - label = '%s:%s:1' % (self.filename, self.line_offset) - for extended_code in self.expected: - code = extended_code.split(':')[0] - if not self.counters.get(code): - self.file_errors += 1 - self.total_errors += 1 - print('%s: error %s not found' % (label, extended_code)) - else: - self.counters[code] -= 1 - for code, extra in sorted(self.counters.items()): - if code not in self._benchmark_keys: - if extra and code in self.expected: - self.file_errors += 1 - self.total_errors += 1 - print('%s: error %s found too many times (+%d)' % - (label, code, extra)) - # Reset counters - del self.counters[code] - if self._verbose and not self.file_errors: - print('%s: passed (%s)' % - (label, ' '.join(self.expected) or 'Okay')) - self.counters['test cases'] += 1 - if self.file_errors: - self.counters['failed tests'] += 1 - return super(TestReport, self).get_file_results() - - def print_results(self): - results = ("%(physical lines)d lines tested: %(files)d files, " - "%(test cases)d test cases%%s." % self.counters) - if self.total_errors: - print(results % ", %s failures" % self.total_errors) - else: - print(results % "") - print("Test failed." if self.total_errors else "Test passed.") - - -class InMemoryReport(BaseReport): - """ - Collect the results in memory, without printing anything. - """ - - def __init__(self, options): - super(InMemoryReport, self).__init__(options) - self.in_memory_errors = [] - - def error(self, line_number, offset, text, check): - """ - Report an error, according to options. - """ - code = text[:4] - self.in_memory_errors.append('%s:%s:%s' % ( - code, line_number, offset + 1)) - return super(InMemoryReport, self).error( - line_number, offset, text, check) - - -def selftest(options): - """ - Test all check functions with test cases in docstrings. - """ - count_failed = count_all = 0 - report = BaseReport(options) - counters = report.counters - checks = options.physical_checks + options.logical_checks - for name, check, argument_names in checks: - for line in check.__doc__.splitlines(): - line = line.lstrip() - match = SELFTEST_REGEX.match(line) - if match is None: - continue - code, source = match.groups() - lines = [part.replace(r'\t', '\t') + '\n' - for part in source.split(r'\n')] - checker = Checker(lines=lines, options=options, report=report) - checker.check_all() - error = None - if code == 'Okay': - if len(counters) > len(options.benchmark_keys): - codes = [key for key in counters - if key not in options.benchmark_keys] - error = "incorrectly found %s" % ', '.join(codes) - elif not counters.get(code): - error = "failed to find %s" % code - # Keep showing errors for multiple tests - for key in set(counters) - set(options.benchmark_keys): - del counters[key] - count_all += 1 - if not error: - if options.verbose: - print("%s: %s" % (code, source)) - else: - count_failed += 1 - print("pycodestyle.py: %s:" % error) - for line in checker.lines: - print(line.rstrip()) - return count_failed, count_all - - -def init_tests(pep8style): - """ - Initialize testing framework. - - A test file can provide many tests. Each test starts with a - declaration. This declaration is a single line starting with '#:'. - It declares codes of expected failures, separated by spaces or - 'Okay' if no failure is expected. - If the file does not contain such declaration, it should pass all - tests. If the declaration is empty, following lines are not - checked, until next declaration. - - Examples: - - * Only E224 and W701 are expected: #: E224 W701 - * Following example is conform: #: Okay - * Don't check these lines: #: - """ - report = pep8style.init_report(TestReport) - runner = pep8style.input_file - - def run_tests(filename): - """Run all the tests from a file.""" - # Skip tests meant for higher versions of python - ver_match = re.search(r'python(\d)(\d+)?\.py$', filename) - if ver_match: - test_against_version = tuple(int(val or 0) - for val in ver_match.groups()) - if sys.version_info < test_against_version: - return - lines = readlines(filename) + ['#:\n'] - line_offset = 0 - codes = ['Okay'] - testcase = [] - count_files = report.counters['files'] - for index, line in enumerate(lines): - if not line.startswith('#:'): - if codes: - # Collect the lines of the test case - testcase.append(line) - continue - if codes and index: - if 'noeol' in codes: - testcase[-1] = testcase[-1].rstrip('\n') - codes = [c for c in codes - if c not in ('Okay', 'noeol')] - # Run the checker - runner(filename, testcase, expected=codes, - line_offset=line_offset) - # output the real line numbers - line_offset = index + 1 - # configure the expected errors - codes = line.split()[1:] - # empty the test case buffer - del testcase[:] - report.counters['files'] = count_files + 1 - return report.counters['failed tests'] - - pep8style.runner = run_tests - - -def run_tests(style): - options = style.options - if options.doctest: - import doctest - fail_d, done_d = doctest.testmod(report=False, verbose=options.verbose) - fail_s, done_s = selftest(options) - count_failed = fail_s + fail_d - if not options.quiet: - count_passed = done_d + done_s - count_failed - print("%d passed and %d failed." % (count_passed, count_failed)) - print("Test failed." if count_failed else "Test passed.") - if count_failed: - sys.exit(1) - if options.testsuite: - init_tests(style) - return style.check_files() diff --git a/testsuite/test_all.py b/testsuite/test_all.py deleted file mode 100644 index 38b3c452d..000000000 --- a/testsuite/test_all.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import os.path -import sys -import unittest - -import pycodestyle -from testsuite.support import init_tests, selftest, ROOT_DIR - - -class PycodestyleTestCase(unittest.TestCase): - """Test the standard errors and warnings (E and W).""" - - def setUp(self): - self._style = pycodestyle.StyleGuide( - paths=[os.path.join(ROOT_DIR, 'testsuite')], - select='E,W', quiet=True) - - def test_doctest(self): - import doctest - fail_d, done_d = doctest.testmod( - pycodestyle, verbose=False, report=False - ) - self.assertTrue(done_d, msg='tests not found') - self.assertFalse(fail_d, msg='%s failure(s)' % fail_d) - - def test_selftest(self): - fail_s, done_s = selftest(self._style.options) - self.assertTrue(done_s, msg='tests not found') - self.assertFalse(fail_s, msg='%s failure(s)' % fail_s) - - def test_checkers_testsuite(self): - init_tests(self._style) - report = self._style.check_files() - self.assertFalse(report.total_errors, - msg='%s failure(s)' % report.total_errors) - - def test_own_dog_food(self): - files = [pycodestyle.__file__.rstrip('oc'), __file__.rstrip('oc'), - os.path.join(ROOT_DIR, 'setup.py')] - report = self._style.init_report(pycodestyle.StandardReport) - report = self._style.check_files(files) - self.assertEqual(list(report.messages.keys()), ['W504'], - msg='Failures: %s' % report.messages) diff --git a/tox.ini b/tox.ini index 18f1a4ff4..c9b9255f3 100644 --- a/tox.ini +++ b/tox.ini @@ -4,17 +4,17 @@ # and then run "tox" from this directory. [tox] -envlist = py36, py37, py38, py39, py310, pypy3 +envlist = py, pypy3 skip_missing_interpreters = True [testenv] -deps = coverage +deps = + covdefaults + coverage + pytest commands = python -m pycodestyle --statistics pycodestyle.py - coverage run -m pycodestyle --max-doc-length=72 --testsuite testsuite - coverage run -m pycodestyle --max-doc-length=72 --doctest - coverage run -m unittest discover testsuite -vv - coverage combine + coverage run -m pytest tests coverage report [testenv:flake8]