pre-commit-hooks: python3.6+

This commit is contained in:
Anthony Sottile 2020-02-05 11:10:42 -08:00
parent 551d1a07b3
commit f5c42a050b
60 changed files with 291 additions and 493 deletions

View File

@ -28,17 +28,22 @@ repos:
rev: v1.9.0
hooks:
- id: reorder-python-imports
language_version: python3
args: [--py3-plus]
- repo: https://github.com/asottile/pyupgrade
rev: v1.26.2
hooks:
- id: pyupgrade
args: [--py36-plus]
- repo: https://github.com/asottile/add-trailing-comma
rev: v1.5.0
hooks:
- id: add-trailing-comma
args: [--py36-plus]
- repo: https://github.com/asottile/setup-cfg-fmt
rev: v1.6.0
hooks:
- id: setup-cfg-fmt
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.761
hooks:
- id: mypy
language_version: python3

View File

@ -16,9 +16,9 @@ jobs:
- template: job--pre-commit.yml@asottile
- template: job--python-tox.yml@asottile
parameters:
toxenvs: [py27, py37]
toxenvs: [py38]
os: windows
- template: job--python-tox.yml@asottile
parameters:
toxenvs: [pypy, pypy3, py27, py36, py37]
toxenvs: [pypy3, py36, py37, py38]
os: linux

View File

@ -1,9 +1,4 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
def main(): # type: () -> int
def main() -> int:
raise SystemExit(
'autopep8-wrapper is deprecated. Instead use autopep8 directly via '
'https://github.com/pre-commit/mirrors-autopep8',

View File

@ -1,13 +1,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import math
import os
from typing import Iterable
from typing import Optional
from typing import Sequence
from typing import Set
@ -17,7 +11,7 @@ from pre_commit_hooks.util import CalledProcessError
from pre_commit_hooks.util import cmd_output
def lfs_files(): # type: () -> Set[str]
def lfs_files() -> Set[str]:
try:
# Introduced in git-lfs 2.2.0, first working in 2.2.1
lfs_ret = cmd_output('git', 'lfs', 'status', '--json')
@ -27,23 +21,20 @@ def lfs_files(): # type: () -> Set[str]
return set(json.loads(lfs_ret)['files'])
def find_large_added_files(filenames, maxkb):
# type: (Iterable[str], int) -> int
def find_large_added_files(filenames: Sequence[str], maxkb: int) -> int:
# Find all added files that are also in the list of files pre-commit tells
# us about
filenames = (added_files() & set(filenames)) - lfs_files()
retv = 0
for filename in filenames:
for filename in (added_files() & set(filenames)) - lfs_files():
kb = int(math.ceil(os.stat(filename).st_size / 1024))
if kb > maxkb:
print('{} ({} KB) exceeds {} KB.'.format(filename, kb, maxkb))
print(f'{filename} ({kb} KB) exceeds {maxkb} KB.')
retv = 1
return retv
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'filenames', nargs='*',

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ast
import platform
@ -11,7 +7,7 @@ from typing import Optional
from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
@ -23,15 +19,11 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
with open(filename, 'rb') as f:
ast.parse(f.read(), filename=filename)
except SyntaxError:
print(
'{}: failed parsing with {} {}:'.format(
filename,
platform.python_implementation(),
sys.version.partition(' ')[0],
),
)
impl = platform.python_implementation()
version = sys.version.split()[0]
print(f'{filename}: failed parsing with {impl} {version}:')
tb = ' ' + traceback.format_exc().replace('\n', '\n ')
print('\n{}'.format(tb))
print(f'\n{tb}')
retval = 1
return retval

View File

@ -1,10 +1,7 @@
from __future__ import unicode_literals
import argparse
import ast
import collections
import sys
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Set
@ -21,23 +18,26 @@ BUILTIN_TYPES = {
}
Call = collections.namedtuple('Call', ['name', 'line', 'column'])
class Call(NamedTuple):
name: str
line: int
column: int
class Visitor(ast.NodeVisitor):
def __init__(self, ignore=None, allow_dict_kwargs=True):
# type: (Optional[Sequence[str]], bool) -> None
self.builtin_type_calls = [] # type: List[Call]
def __init__(
self,
ignore: Optional[Sequence[str]] = None,
allow_dict_kwargs: bool = True,
) -> None:
self.builtin_type_calls: List[Call] = []
self.ignore = set(ignore) if ignore else set()
self.allow_dict_kwargs = allow_dict_kwargs
def _check_dict_call(self, node): # type: (ast.Call) -> bool
return (
self.allow_dict_kwargs and
(getattr(node, 'kwargs', None) or getattr(node, 'keywords', None))
)
def _check_dict_call(self, node: ast.Call) -> bool:
return self.allow_dict_kwargs and bool(node.keywords)
def visit_Call(self, node): # type: (ast.Call) -> None
def visit_Call(self, node: ast.Call) -> None:
if not isinstance(node.func, ast.Name):
# Ignore functions that are object attributes (`foo.bar()`).
# Assume that if the user calls `builtins.list()`, they know what
@ -54,8 +54,11 @@ class Visitor(ast.NodeVisitor):
)
def check_file(filename, ignore=None, allow_dict_kwargs=True):
# type: (str, Optional[Sequence[str]], bool) -> List[Call]
def check_file(
filename: str,
ignore: Optional[Sequence[str]] = None,
allow_dict_kwargs: bool = True,
) -> List[Call]:
with open(filename, 'rb') as f:
tree = ast.parse(f.read(), filename=filename)
visitor = Visitor(ignore=ignore, allow_dict_kwargs=allow_dict_kwargs)
@ -63,11 +66,11 @@ def check_file(filename, ignore=None, allow_dict_kwargs=True):
return visitor.builtin_type_calls
def parse_ignore(value): # type: (str) -> Set[str]
def parse_ignore(value: str) -> Set[str]:
return set(value.split(','))
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument('--ignore', type=parse_ignore, default=set())
@ -93,15 +96,11 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
rc = rc or 1
for call in calls:
print(
'{filename}:{call.line}:{call.column}: '
'replace {call.name}() with {replacement}'.format(
filename=filename,
call=call,
replacement=BUILTIN_TYPES[call.name],
),
f'{filename}:{call.line}:{call.column}: '
f'replace {call.name}() with {BUILTIN_TYPES[call.name]}',
)
return rc
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,13 +1,9 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from typing import Optional
from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
@ -18,7 +14,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
with open(filename, 'rb') as f:
if f.read(3) == b'\xef\xbb\xbf':
retv = 1
print('{}: Has a byte-order marker'.format(filename))
print(f'{filename}: Has a byte-order marker')
return retv

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from typing import Iterable
from typing import Optional
@ -12,11 +8,11 @@ from pre_commit_hooks.util import added_files
from pre_commit_hooks.util import cmd_output
def lower_set(iterable): # type: (Iterable[str]) -> Set[str]
def lower_set(iterable: Iterable[str]) -> Set[str]:
return {x.lower() for x in iterable}
def find_conflicting_filenames(filenames): # type: (Sequence[str]) -> int
def find_conflicting_filenames(filenames: Sequence[str]) -> int:
repo_files = set(cmd_output('git', 'ls-files').splitlines())
relevant_files = set(filenames) | added_files()
repo_files -= relevant_files
@ -39,13 +35,13 @@ def find_conflicting_filenames(filenames): # type: (Sequence[str]) -> int
if x.lower() in conflicts
]
for filename in sorted(conflicting_files):
print('Case-insensitivity conflict found: {}'.format(filename))
print(f'Case-insensitivity conflict found: {filename}')
retv = 1
return retv
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'filenames', nargs='*',

View File

@ -1,30 +1,17 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import io
import tokenize
from tokenize import tokenize as tokenize_tokenize
from typing import Optional
from typing import Sequence
import six
if six.PY2: # pragma: no cover (PY2)
from tokenize import generate_tokens as tokenize_tokenize
OTHER_NON_CODE = ()
else: # pragma: no cover (PY3)
from tokenize import tokenize as tokenize_tokenize
OTHER_NON_CODE = (tokenize.ENCODING,)
NON_CODE_TOKENS = frozenset(
(tokenize.COMMENT, tokenize.ENDMARKER, tokenize.NEWLINE, tokenize.NL) +
OTHER_NON_CODE,
)
NON_CODE_TOKENS = frozenset((
tokenize.COMMENT, tokenize.ENDMARKER, tokenize.NEWLINE, tokenize.NL,
tokenize.ENCODING,
))
def check_docstring_first(src, filename='<unknown>'):
# type: (bytes, str) -> int
def check_docstring_first(src: bytes, filename: str = '<unknown>') -> int:
"""Returns nonzero if the source has what looks like a docstring that is
not at the beginning of the source.
@ -40,18 +27,14 @@ def check_docstring_first(src, filename='<unknown>'):
if tok_type == tokenize.STRING and scol == 0:
if found_docstring_line is not None:
print(
'{}:{} Multiple module docstrings '
'(first docstring on line {}).'.format(
filename, sline, found_docstring_line,
),
f'{filename}:{sline} Multiple module docstrings '
f'(first docstring on line {found_docstring_line}).',
)
return 1
elif found_code_line is not None:
print(
'{}:{} Module docstring appears after code '
'(code seen on line {}).'.format(
filename, sline, found_code_line,
),
f'{filename}:{sline} Module docstring appears after code '
f'(code seen on line {found_code_line}).',
)
return 1
else:
@ -62,7 +45,7 @@ def check_docstring_first(src, filename='<unknown>'):
return 0
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)

View File

@ -1,28 +1,22 @@
"""Check that executable text files have a shebang."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pipes
import shlex
import sys
from typing import Optional
from typing import Sequence
def check_has_shebang(path): # type: (str) -> int
def check_has_shebang(path: str) -> int:
with open(path, 'rb') as f:
first_bytes = f.read(2)
if first_bytes != b'#!':
quoted = shlex.quote(path)
print(
'{path}: marked executable but has no (or invalid) shebang!\n'
" If it isn't supposed to be executable, try: chmod -x {quoted}\n"
' If it is supposed to be executable, double-check its shebang.'
.format(
path=path,
quoted=pipes.quote(path),
),
f'{path}: marked executable but has no (or invalid) shebang!\n'
f" If it isn't supposed to be executable, try: "
f'`chmod -x {quoted}`\n'
f' If it is supposed to be executable, double-check its shebang.',
file=sys.stderr,
)
return 1
@ -30,7 +24,7 @@ def check_has_shebang(path): # type: (str) -> int
return 0
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)

View File

@ -1,27 +1,25 @@
from __future__ import print_function
import argparse
import io
import json
import sys
from typing import Optional
from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
retval = 0
for filename in args.filenames:
try:
json.load(io.open(filename, encoding='UTF-8'))
except (ValueError, UnicodeDecodeError) as exc:
print('{}: Failed to json decode ({})'.format(filename, exc))
retval = 1
with open(filename, 'rb') as f:
try:
json.load(f)
# TODO: need UnicodeDecodeError?
except (ValueError, UnicodeDecodeError) as exc:
print(f'{filename}: Failed to json decode ({exc})')
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,5 +1,3 @@
from __future__ import print_function
import argparse
import os.path
from typing import Optional
@ -12,10 +10,9 @@ CONFLICT_PATTERNS = [
b'=======\n',
b'>>>>>>> ',
]
WARNING_MSG = 'Merge conflict string "{0}" found in {1}:{2}'
def is_in_merge(): # type: () -> int
def is_in_merge() -> int:
return (
os.path.exists(os.path.join('.git', 'MERGE_MSG')) and
(
@ -26,7 +23,7 @@ def is_in_merge(): # type: () -> int
)
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument('--assume-in-merge', action='store_true')
@ -42,9 +39,8 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
for pattern in CONFLICT_PATTERNS:
if line.startswith(pattern):
print(
WARNING_MSG.format(
pattern.decode(), filename, i + 1,
),
f'Merge conflict string "{pattern.decode()}" '
f'found in {filename}:{i + 1}',
)
retcode = 1

View File

@ -1,14 +1,10 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os.path
from typing import Optional
from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(description='Checks for broken symlinks.')
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
@ -20,7 +16,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
os.path.islink(filename) and
not os.path.exists(filename)
): # pragma: no cover (symlink support required)
print('{}: Broken symlink'.format(filename))
print(f'{filename}: Broken symlink')
retv = 1
return retv

View File

@ -1,14 +1,11 @@
from __future__ import print_function
import argparse
import sys
from typing import Optional
from typing import Sequence
import toml
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
@ -19,10 +16,10 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
with open(filename) as f:
toml.load(f)
except toml.TomlDecodeError as exc:
print('{}: {}'.format(filename, exc))
print(f'{filename}: {exc}')
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import sys
@ -14,19 +10,19 @@ GITHUB_NON_PERMALINK = re.compile(
)
def _check_filename(filename): # type: (str) -> int
def _check_filename(filename: str) -> int:
retv = 0
with open(filename, 'rb') as f:
for i, line in enumerate(f, 1):
if GITHUB_NON_PERMALINK.search(line):
sys.stdout.write('{}:{}:'.format(filename, i))
sys.stdout.write(f'{filename}:{i}:')
sys.stdout.flush()
getattr(sys.stdout, 'buffer', sys.stdout).write(line)
sys.stdout.buffer.write(line)
retv = 1
return retv
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)

View File

@ -1,30 +1,26 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import io
import sys
import xml.sax.handler
from typing import Optional
from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='XML filenames to check.')
args = parser.parse_args(argv)
retval = 0
handler = xml.sax.handler.ContentHandler()
for filename in args.filenames:
try:
with io.open(filename, 'rb') as xml_file:
xml.sax.parse(xml_file, xml.sax.handler.ContentHandler())
with open(filename, 'rb') as xml_file:
# https://github.com/python/typeshed/pull/3725
xml.sax.parse(xml_file, handler) # type: ignore
except xml.sax.SAXException as exc:
print('{}: Failed to xml parse ({})'.format(filename, exc))
print(f'{filename}: Failed to xml parse ({exc})')
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,11 +1,7 @@
from __future__ import print_function
import argparse
import collections
import io
import sys
from typing import Any
from typing import Generator
from typing import NamedTuple
from typing import Optional
from typing import Sequence
@ -14,20 +10,24 @@ import ruamel.yaml
yaml = ruamel.yaml.YAML(typ='safe')
def _exhaust(gen): # type: (Generator[str, None, None]) -> None
def _exhaust(gen: Generator[str, None, None]) -> None:
for _ in gen:
pass
def _parse_unsafe(*args, **kwargs): # type: (*Any, **Any) -> None
def _parse_unsafe(*args: Any, **kwargs: Any) -> None:
_exhaust(yaml.parse(*args, **kwargs))
def _load_all(*args, **kwargs): # type: (*Any, **Any) -> None
def _load_all(*args: Any, **kwargs: Any) -> None:
_exhaust(yaml.load_all(*args, **kwargs))
Key = collections.namedtuple('Key', ('multi', 'unsafe'))
class Key(NamedTuple):
multi: bool
unsafe: bool
LOAD_FNS = {
Key(multi=False, unsafe=False): yaml.load,
Key(multi=False, unsafe=True): _parse_unsafe,
@ -36,7 +36,7 @@ LOAD_FNS = {
}
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--multi', '--allow-multiple-documents', action='store_true',
@ -59,7 +59,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
retval = 0
for filename in args.filenames:
try:
with io.open(filename, encoding='UTF-8') as f:
with open(filename, encoding='UTF-8') as f:
load_fn(f)
except ruamel.yaml.YAMLError as exc:
print(exc)
@ -68,4 +68,4 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,35 +1,38 @@
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ast
import collections
import traceback
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Sequence
DEBUG_STATEMENTS = {'pdb', 'ipdb', 'pudb', 'q', 'rdb', 'rpdb'}
Debug = collections.namedtuple('Debug', ('line', 'col', 'name', 'reason'))
class Debug(NamedTuple):
line: int
col: int
name: str
reason: str
class DebugStatementParser(ast.NodeVisitor):
def __init__(self): # type: () -> None
self.breakpoints = [] # type: List[Debug]
def __init__(self) -> None:
self.breakpoints: List[Debug] = []
def visit_Import(self, node): # type: (ast.Import) -> None
def visit_Import(self, node: ast.Import) -> None:
for name in node.names:
if name.name in DEBUG_STATEMENTS:
st = Debug(node.lineno, node.col_offset, name.name, 'imported')
self.breakpoints.append(st)
def visit_ImportFrom(self, node): # type: (ast.ImportFrom) -> None
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module in DEBUG_STATEMENTS:
st = Debug(node.lineno, node.col_offset, node.module, 'imported')
self.breakpoints.append(st)
def visit_Call(self, node): # type: (ast.Call) -> None
def visit_Call(self, node: ast.Call) -> None:
"""python3.7+ breakpoint()"""
if isinstance(node.func, ast.Name) and node.func.id == 'breakpoint':
st = Debug(node.lineno, node.col_offset, node.func.id, 'called')
@ -37,12 +40,12 @@ class DebugStatementParser(ast.NodeVisitor):
self.generic_visit(node)
def check_file(filename): # type: (str) -> int
def check_file(filename: str) -> int:
try:
with open(filename, 'rb') as f:
ast_obj = ast.parse(f.read(), filename=filename)
except SyntaxError:
print('{} - Could not parse ast'.format(filename))
print(f'{filename} - Could not parse ast')
print()
print('\t' + traceback.format_exc().replace('\n', '\n\t'))
print()
@ -52,16 +55,12 @@ def check_file(filename): # type: (str) -> int
visitor.visit(ast_obj)
for bp in visitor.breakpoints:
print(
'{}:{}:{} - {} {}'.format(
filename, bp.line, bp.col, bp.name, bp.reason,
),
)
print(f'{filename}:{bp.line}:{bp.col} - {bp.name} {bp.reason}')
return int(bool(visitor.breakpoints))
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to run')
args = parser.parse_args(argv)

View File

@ -1,18 +1,19 @@
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import configparser
import os
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Set
from six.moves import configparser
class BadFile(NamedTuple):
filename: str
key: str
def get_aws_cred_files_from_env(): # type: () -> Set[str]
def get_aws_cred_files_from_env() -> Set[str]:
"""Extract credential file paths from environment variables."""
return {
os.environ[env_var]
@ -24,7 +25,7 @@ def get_aws_cred_files_from_env(): # type: () -> Set[str]
}
def get_aws_secrets_from_env(): # type: () -> Set[str]
def get_aws_secrets_from_env() -> Set[str]:
"""Extract AWS secrets from environment variables."""
keys = set()
for env_var in (
@ -35,7 +36,7 @@ def get_aws_secrets_from_env(): # type: () -> Set[str]
return keys
def get_aws_secrets_from_file(credentials_file): # type: (str) -> Set[str]
def get_aws_secrets_from_file(credentials_file: str) -> Set[str]:
"""Extract AWS secrets from configuration files.
Read an ini-style configuration file and return a set with all found AWS
@ -66,8 +67,10 @@ def get_aws_secrets_from_file(credentials_file): # type: (str) -> Set[str]
return keys
def check_file_for_aws_keys(filenames, keys):
# type: (Sequence[str], Set[str]) -> List[Dict[str, str]]
def check_file_for_aws_keys(
filenames: Sequence[str],
keys: Set[str],
) -> List[BadFile]:
"""Check if files contain AWS secrets.
Return a list of all files containing AWS secrets and keys found, with all
@ -82,13 +85,11 @@ def check_file_for_aws_keys(filenames, keys):
# naively match the entire file, low chance of incorrect
# collision
if key in text_body:
bad_files.append({
'filename': filename, 'key': key[:4] + '*' * 28,
})
bad_files.append(BadFile(filename, key[:4].ljust(28, '*')))
return bad_files
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='+', help='Filenames to run')
parser.add_argument(
@ -117,7 +118,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
# of files to to gather AWS secrets from.
credential_files |= get_aws_cred_files_from_env()
keys = set() # type: Set[str]
keys: Set[str] = set()
for credential_file in credential_files:
keys |= get_aws_secrets_from_file(credential_file)
@ -139,7 +140,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
bad_filenames = check_file_for_aws_keys(args.filenames, keys)
if bad_filenames:
for bad_file in bad_filenames:
print('AWS secret found in {filename}: {key}'.format(**bad_file))
print(f'AWS secret found in {bad_file.filename}: {bad_file.key}')
return 1
else:
return 0

View File

@ -1,7 +1,4 @@
from __future__ import print_function
import argparse
import sys
from typing import Optional
from typing import Sequence
@ -17,7 +14,7 @@ BLACKLIST = [
]
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
@ -32,11 +29,11 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
if private_key_files:
for private_key_file in private_key_files:
print('Private key found: {}'.format(private_key_file))
print(f'Private key found: {private_key_file}')
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,20 +1,16 @@
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
from typing import IO
from typing import Optional
from typing import Sequence
def fix_file(file_obj): # type: (IO[bytes]) -> int
def fix_file(file_obj: IO[bytes]) -> int:
# Test for newline at end of file
# Empty files will throw IOError here
try:
file_obj.seek(-1, os.SEEK_END)
except IOError:
except OSError:
return 0
last_character = file_obj.read(1)
# last_character will be '' for an empty file
@ -52,7 +48,7 @@ def fix_file(file_obj): # type: (IO[bytes]) -> int
return 0
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
@ -64,11 +60,11 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
with open(filename, 'rb+') as file_obj:
ret_for_file = fix_file(file_obj)
if ret_for_file:
print('Fixing {}'.format(filename))
print(f'Fixing {filename}')
retv |= ret_for_file
return retv
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -9,10 +9,7 @@ per line. Various users are adding/removing lines from this file; using
this hook on that file should reduce the instances of git merge
conflicts and keep the file nicely ordered.
"""
from __future__ import print_function
import argparse
import sys
from typing import IO
from typing import Optional
from typing import Sequence
@ -21,7 +18,7 @@ PASS = 0
FAIL = 1
def sort_file_contents(f): # type: (IO[bytes]) -> int
def sort_file_contents(f: IO[bytes]) -> int:
before = list(f)
after = sorted([line.strip(b'\n\r') for line in before if line.strip()])
@ -37,7 +34,7 @@ def sort_file_contents(f): # type: (IO[bytes]) -> int
return FAIL
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='+', help='Files to sort')
args = parser.parse_args(argv)
@ -49,7 +46,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
ret_for_file = sort_file_contents(file_obj)
if ret_for_file:
print('Sorting {}'.format(arg))
print(f'Sorting {arg}')
retv |= ret_for_file
@ -57,4 +54,4 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,18 +1,13 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
from typing import IO
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Union
DEFAULT_PRAGMA = b'# -*- coding: utf-8 -*-'
def has_coding(line): # type: (bytes) -> bool
def has_coding(line: bytes) -> bool:
if not line.strip():
return False
return (
@ -25,30 +20,30 @@ def has_coding(line): # type: (bytes) -> bool
)
class ExpectedContents(
collections.namedtuple(
'ExpectedContents', ('shebang', 'rest', 'pragma_status', 'ending'),
),
):
"""
pragma_status:
- True: has exactly the coding pragma expected
- False: missing coding pragma entirely
- None: has a coding pragma, but it does not match
"""
__slots__ = ()
class ExpectedContents(NamedTuple):
shebang: bytes
rest: bytes
# True: has exactly the coding pragma expected
# False: missing coding pragma entirely
# None: has a coding pragma, but it does not match
pragma_status: Optional[bool]
ending: bytes
@property
def has_any_pragma(self): # type: () -> bool
def has_any_pragma(self) -> bool:
return self.pragma_status is not False
def is_expected_pragma(self, remove): # type: (bool) -> bool
def is_expected_pragma(self, remove: bool) -> bool:
expected_pragma_status = not remove
return self.pragma_status is expected_pragma_status
def _get_expected_contents(first_line, second_line, rest, expected_pragma):
# type: (bytes, bytes, bytes, bytes) -> ExpectedContents
def _get_expected_contents(
first_line: bytes,
second_line: bytes,
rest: bytes,
expected_pragma: bytes,
) -> ExpectedContents:
ending = b'\r\n' if first_line.endswith(b'\r\n') else b'\n'
if first_line.startswith(b'#!'):
@ -60,7 +55,7 @@ def _get_expected_contents(first_line, second_line, rest, expected_pragma):
rest = second_line + rest
if potential_coding.rstrip(b'\r\n') == expected_pragma:
pragma_status = True # type: Optional[bool]
pragma_status: Optional[bool] = True
elif has_coding(potential_coding):
pragma_status = None
else:
@ -72,8 +67,11 @@ def _get_expected_contents(first_line, second_line, rest, expected_pragma):
)
def fix_encoding_pragma(f, remove=False, expected_pragma=DEFAULT_PRAGMA):
# type: (IO[bytes], bool, bytes) -> int
def fix_encoding_pragma(
f: IO[bytes],
remove: bool = False,
expected_pragma: bytes = DEFAULT_PRAGMA,
) -> int:
expected = _get_expected_contents(
f.readline(), f.readline(), f.read(), expected_pragma,
)
@ -103,21 +101,20 @@ def fix_encoding_pragma(f, remove=False, expected_pragma=DEFAULT_PRAGMA):
return 1
def _normalize_pragma(pragma): # type: (Union[bytes, str]) -> bytes
if not isinstance(pragma, bytes):
pragma = pragma.encode('UTF-8')
return pragma.rstrip()
def _normalize_pragma(pragma: str) -> bytes:
return pragma.encode().rstrip()
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
'Fixes the encoding pragma of python files',
)
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
parser.add_argument(
'--pragma', default=DEFAULT_PRAGMA, type=_normalize_pragma,
help='The encoding pragma to use. Default: {}'.format(
DEFAULT_PRAGMA.decode(),
help=(
f'The encoding pragma to use. '
f'Default: {DEFAULT_PRAGMA.decode()}'
),
)
parser.add_argument(
@ -141,9 +138,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
retv |= file_ret
if file_ret:
print(
fmt.format(
pragma=args.pragma.decode(), filename=filename,
),
fmt.format(pragma=args.pragma.decode(), filename=filename),
)
return retv

View File

@ -1,14 +1,10 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from typing import Optional
from typing import Sequence
from pre_commit_hooks.util import cmd_output
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
# `argv` is ignored, pre-commit will send us a list of files that we
# don't care about
added_diff = cmd_output(
@ -19,7 +15,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
metadata, filename = line.split('\t', 1)
new_mode = metadata.split(' ')[1]
if new_mode == '160000':
print('{}: new submodule introduced'.format(filename))
print(f'{filename}: new submodule introduced')
retv = 1
if retv:

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
from typing import Dict
@ -17,7 +13,7 @@ ALL_ENDINGS = (CR, CRLF, LF)
FIX_TO_LINE_ENDING = {'cr': CR, 'crlf': CRLF, 'lf': LF}
def _fix(filename, contents, ending): # type: (str, bytes, bytes) -> None
def _fix(filename: str, contents: bytes, ending: bytes) -> None:
new_contents = b''.join(
line.rstrip(b'\r\n') + ending for line in contents.splitlines(True)
)
@ -25,11 +21,11 @@ def _fix(filename, contents, ending): # type: (str, bytes, bytes) -> None
f.write(new_contents)
def fix_filename(filename, fix): # type: (str, str) -> int
def fix_filename(filename: str, fix: str) -> int:
with open(filename, 'rb') as f:
contents = f.read()
counts = collections.defaultdict(int) # type: Dict[bytes, int]
counts: Dict[bytes, int] = collections.defaultdict(int)
for line in contents.splitlines(True):
for ending in ALL_ENDINGS:
@ -66,7 +62,7 @@ def fix_filename(filename, fix): # type: (str, str) -> int
return other_endings
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--fix',
@ -81,9 +77,9 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
for filename in args.filenames:
if fix_filename(filename, args.fix):
if args.fix == 'no':
print('{}: mixed line endings'.format(filename))
print(f'{filename}: mixed line endings')
else:
print('{}: fixed mixed line endings'.format(filename))
print(f'{filename}: fixed mixed line endings')
retv = 1
return retv

View File

@ -1,5 +1,3 @@
from __future__ import print_function
import argparse
import re
from typing import AbstractSet
@ -10,8 +8,10 @@ from pre_commit_hooks.util import CalledProcessError
from pre_commit_hooks.util import cmd_output
def is_on_branch(protected, patterns=frozenset()):
# type: (AbstractSet[str], AbstractSet[str]) -> bool
def is_on_branch(
protected: AbstractSet[str],
patterns: AbstractSet[str] = frozenset(),
) -> bool:
try:
ref_name = cmd_output('git', 'symbolic-ref', 'HEAD')
except CalledProcessError:
@ -23,7 +23,7 @@ def is_on_branch(protected, patterns=frozenset()):
)
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'-b', '--branch', action='append',

View File

@ -1,10 +1,5 @@
from __future__ import print_function
import argparse
import io
import json
import sys
from collections import OrderedDict
from difflib import unified_diff
from typing import List
from typing import Mapping
@ -13,38 +8,36 @@ from typing import Sequence
from typing import Tuple
from typing import Union
from six import text_type
def _get_pretty_format(
contents, indent, ensure_ascii=True, sort_keys=True, top_keys=(),
): # type: (str, str, bool, bool, Sequence[str]) -> str
def pairs_first(pairs):
# type: (Sequence[Tuple[str, str]]) -> Mapping[str, str]
contents: str,
indent: str,
ensure_ascii: bool = True,
sort_keys: bool = True,
top_keys: Sequence[str] = (),
) -> str:
def pairs_first(pairs: Sequence[Tuple[str, str]]) -> Mapping[str, str]:
before = [pair for pair in pairs if pair[0] in top_keys]
before = sorted(before, key=lambda x: top_keys.index(x[0]))
after = [pair for pair in pairs if pair[0] not in top_keys]
if sort_keys:
after = sorted(after, key=lambda x: x[0])
return OrderedDict(before + after)
after.sort()
return dict(before + after)
json_pretty = json.dumps(
json.loads(contents, object_pairs_hook=pairs_first),
indent=indent,
ensure_ascii=ensure_ascii,
# Workaround for https://bugs.python.org/issue16333
separators=(',', ': '),
)
# Ensure unicode (Py2) and add the newline that dumps does not end with.
return text_type(json_pretty) + '\n'
return f'{json_pretty}\n'
def _autofix(filename, new_contents): # type: (str, str) -> None
print('Fixing file {}'.format(filename))
with io.open(filename, 'w', encoding='UTF-8') as f:
def _autofix(filename: str, new_contents: str) -> None:
print(f'Fixing file {filename}')
with open(filename, 'w', encoding='UTF-8') as f:
f.write(new_contents)
def parse_num_to_int(s): # type: (str) -> Union[int, str]
def parse_num_to_int(s: str) -> Union[int, str]:
"""Convert string numbers to int, leaving strings as is."""
try:
return int(s)
@ -52,18 +45,18 @@ def parse_num_to_int(s): # type: (str) -> Union[int, str]
return s
def parse_topkeys(s): # type: (str) -> List[str]
def parse_topkeys(s: str) -> List[str]:
return s.split(',')
def get_diff(source, target, file): # type: (str, str, str) -> str
def get_diff(source: str, target: str, file: str) -> str:
source_lines = source.splitlines(True)
target_lines = target.splitlines(True)
diff = unified_diff(source_lines, target_lines, fromfile=file, tofile=file)
return ''.join(diff)
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'--autofix',
@ -110,7 +103,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
status = 0
for json_file in args.filenames:
with io.open(json_file, encoding='UTF-8') as f:
with open(json_file, encoding='UTF-8') as f:
contents = f.read()
try:
@ -131,8 +124,8 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
status = 1
except ValueError:
print(
'Input File {} is not a valid JSON, consider using check-json'
.format(json_file),
f'Input File {json_file} is not a valid JSON, consider using '
f'check-json',
)
return 1
@ -140,4 +133,4 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,5 +1,3 @@
from __future__ import print_function
import argparse
from typing import IO
from typing import List
@ -11,15 +9,13 @@ PASS = 0
FAIL = 1
class Requirement(object):
def __init__(self): # type: () -> None
super(Requirement, self).__init__()
self.value = None # type: Optional[bytes]
self.comments = [] # type: List[bytes]
class Requirement:
def __init__(self) -> None:
self.value: Optional[bytes] = None
self.comments: List[bytes] = []
@property
def name(self): # type: () -> bytes
def name(self) -> bytes:
assert self.value is not None, self.value
for egg in (b'#egg=', b'&egg='):
if egg in self.value:
@ -27,7 +23,7 @@ class Requirement(object):
return self.value.lower().partition(b'==')[0]
def __lt__(self, requirement): # type: (Requirement) -> int
def __lt__(self, requirement: 'Requirement') -> int:
# \n means top of file comment, so always return True,
# otherwise just do a string comparison with value.
assert self.value is not None, self.value
@ -39,10 +35,10 @@ class Requirement(object):
return self.name < requirement.name
def fix_requirements(f): # type: (IO[bytes]) -> int
requirements = [] # type: List[Requirement]
def fix_requirements(f: IO[bytes]) -> int:
requirements: List[Requirement] = []
before = list(f)
after = [] # type: List[bytes]
after: List[bytes] = []
before_string = b''.join(before)
@ -109,7 +105,7 @@ def fix_requirements(f): # type: (IO[bytes]) -> int
return FAIL
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
@ -121,7 +117,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
ret_for_file = fix_requirements(file_obj)
if ret_for_file:
print('Sorting {}'.format(arg))
print(f'Sorting {arg}')
retv |= ret_for_file

View File

@ -18,8 +18,6 @@ We assume a strict subset of YAML that looks like:
In other words, we don't sort deeper than the top layer, and might corrupt
complicated YAML files.
"""
from __future__ import print_function
import argparse
from typing import List
from typing import Optional
@ -29,7 +27,7 @@ from typing import Sequence
QUOTES = ["'", '"']
def sort(lines): # type: (List[str]) -> List[str]
def sort(lines: List[str]) -> List[str]:
"""Sort a YAML file in alphabetical order, keeping blocks together.
:param lines: array of strings (without newlines)
@ -47,7 +45,7 @@ def sort(lines): # type: (List[str]) -> List[str]
return new_lines
def parse_block(lines, header=False): # type: (List[str], bool) -> List[str]
def parse_block(lines: List[str], header: bool = False) -> List[str]:
"""Parse and return a single block, popping off the start of `lines`.
If parsing a header block, we stop after we reach a line that is not a
@ -63,7 +61,7 @@ def parse_block(lines, header=False): # type: (List[str], bool) -> List[str]
return block_lines
def parse_blocks(lines): # type: (List[str]) -> List[List[str]]
def parse_blocks(lines: List[str]) -> List[List[str]]:
"""Parse and return all possible blocks, popping off the start of `lines`.
:param lines: list of lines
@ -80,7 +78,7 @@ def parse_blocks(lines): # type: (List[str]) -> List[List[str]]
return blocks
def first_key(lines): # type: (List[str]) -> str
def first_key(lines: List[str]) -> str:
"""Returns a string representing the sort key of a block.
The sort key is the first YAML key we encounter, ignoring comments, and
@ -102,7 +100,7 @@ def first_key(lines): # type: (List[str]) -> str
return '' # not actually reached in reality
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
@ -115,7 +113,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
new_lines = sort(lines)
if lines != new_lines:
print('Fixing file `{filename}`'.format(filename=filename))
print(f'Fixing file `{filename}`')
f.seek(0)
f.write('\n'.join(new_lines) + '\n')
f.truncate()

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import io
import re
@ -13,7 +9,7 @@ from typing import Sequence
START_QUOTE_RE = re.compile('^[a-zA-Z]*"')
def handle_match(token_text): # type: (str) -> str
def handle_match(token_text: str) -> str:
if '"""' in token_text or "'''" in token_text:
return token_text
@ -28,7 +24,7 @@ def handle_match(token_text): # type: (str) -> str
return token_text
def get_line_offsets_by_line_no(src): # type: (str) -> List[int]
def get_line_offsets_by_line_no(src: str) -> List[int]:
# Padded so we can index with line number
offsets = [-1, 0]
for line in src.splitlines(True):
@ -36,8 +32,8 @@ def get_line_offsets_by_line_no(src): # type: (str) -> List[int]
return offsets
def fix_strings(filename): # type: (str) -> int
with io.open(filename, encoding='UTF-8', newline='') as f:
def fix_strings(filename: str) -> int:
with open(filename, encoding='UTF-8', newline='') as f:
contents = f.read()
line_offsets = get_line_offsets_by_line_no(contents)
@ -57,14 +53,14 @@ def fix_strings(filename): # type: (str) -> int
new_contents = ''.join(splitcontents)
if contents != new_contents:
with io.open(filename, 'w', encoding='UTF-8', newline='') as f:
with open(filename, 'w', encoding='UTF-8', newline='') as f:
f.write(new_contents)
return 1
else:
return 0
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
@ -74,7 +70,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
for filename in args.filenames:
return_value = fix_strings(filename)
if return_value != 0:
print('Fixing strings in {}'.format(filename))
print(f'Fixing strings in {filename}')
retv |= return_value
return retv

View File

@ -1,14 +1,11 @@
from __future__ import print_function
import argparse
import os.path
import re
import sys
from typing import Optional
from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument(
@ -27,14 +24,10 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
not base == 'conftest.py'
):
retcode = 1
print(
'{} does not match pattern "{}"'.format(
filename, test_name_pattern,
),
)
print(f'{filename} does not match pattern "{test_name_pattern}"')
return retcode
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,14 +1,14 @@
from __future__ import print_function
import argparse
import os
import sys
from typing import Optional
from typing import Sequence
def _fix_file(filename, is_markdown, chars):
# type: (str, bool, Optional[bytes]) -> bool
def _fix_file(
filename: str,
is_markdown: bool,
chars: Optional[bytes],
) -> bool:
with open(filename, mode='rb') as file_processed:
lines = file_processed.readlines()
newlines = [_process_line(line, is_markdown, chars) for line in lines]
@ -21,8 +21,11 @@ def _fix_file(filename, is_markdown, chars):
return False
def _process_line(line, is_markdown, chars):
# type: (bytes, bool, Optional[bytes]) -> bytes
def _process_line(
line: bytes,
is_markdown: bool,
chars: Optional[bytes],
) -> bytes:
if line[-2:] == b'\r\n':
eol = b'\r\n'
line = line[:-2]
@ -37,7 +40,7 @@ def _process_line(line, is_markdown, chars):
return line.rstrip(chars) + eol
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'--no-markdown-linebreak-ext',
@ -80,20 +83,20 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
for ext in md_exts:
if any(c in ext[1:] for c in r'./\:'):
parser.error(
'bad --markdown-linebreak-ext extension {!r} (has . / \\ :)\n'
" (probably filename; use '--markdown-linebreak-ext=EXT')"
.format(ext),
f'bad --markdown-linebreak-ext extension '
f'{ext!r} (has . / \\ :)\n'
f" (probably filename; use '--markdown-linebreak-ext=EXT')",
)
chars = None if args.chars is None else args.chars.encode('utf-8')
chars = None if args.chars is None else args.chars.encode()
return_code = 0
for filename in args.filenames:
_, extension = os.path.splitext(filename.lower())
md = all_markdown or extension in md_exts
if _fix_file(filename, md, chars):
print('Fixing {}'.format(filename))
print(f'Fixing {filename}')
return_code = 1
return return_code
if __name__ == '__main__':
sys.exit(main())
exit(main())

View File

@ -1,9 +1,6 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
from typing import Any
from typing import Optional
from typing import Set
@ -11,18 +8,17 @@ class CalledProcessError(RuntimeError):
pass
def added_files(): # type: () -> Set[str]
def added_files() -> Set[str]:
cmd = ('git', 'diff', '--staged', '--name-only', '--diff-filter=A')
return set(cmd_output(*cmd).splitlines())
def cmd_output(*cmd, **kwargs): # type: (*str, **Any) -> str
retcode = kwargs.pop('retcode', 0)
def cmd_output(*cmd: str, retcode: Optional[int] = 0, **kwargs: Any) -> str:
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
proc = subprocess.Popen(cmd, **kwargs)
stdout, stderr = proc.communicate()
stdout = stdout.decode('UTF-8')
stdout = stdout.decode()
if retcode is not None and proc.returncode != retcode:
raise CalledProcessError(cmd, retcode, proc.returncode, stdout, stderr)
return stdout

View File

@ -11,13 +11,11 @@ license = MIT
license_file = LICENSE
classifiers =
License :: OSI Approved :: MIT License
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
@ -27,9 +25,7 @@ install_requires =
flake8
ruamel.yaml>=0.15
toml
six
typing; python_version<"3.5"
python_requires = >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
python_requires = >=3.6
[options.entry_points]
console_scripts =

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os.path

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.autopep8_wrapper import main

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import distutils.spawn
import pytest
@ -78,7 +75,7 @@ xfailif_no_gitlfs = pytest.mark.xfail(
@xfailif_no_gitlfs
def test_allows_gitlfs(temp_git_dir, monkeypatch): # pragma: no cover
with temp_git_dir.as_cwd():
monkeypatch.setenv(str('HOME'), str(temp_git_dir.strpath))
monkeypatch.setenv('HOME', str(temp_git_dir.strpath))
cmd_output('git', 'lfs', 'install')
temp_git_dir.join('f.py').write('a' * 10000)
cmd_output('git', 'lfs', 'track', 'f.py')
@ -90,7 +87,7 @@ def test_allows_gitlfs(temp_git_dir, monkeypatch): # pragma: no cover
@xfailif_no_gitlfs
def test_moves_with_gitlfs(temp_git_dir, monkeypatch): # pragma: no cover
with temp_git_dir.as_cwd():
monkeypatch.setenv(str('HOME'), str(temp_git_dir.strpath))
monkeypatch.setenv('HOME', str(temp_git_dir.strpath))
cmd_output('git', 'lfs', 'install')
cmd_output('git', 'lfs', 'track', 'a.bin', 'b.bin')
# First add the file we're going to move

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit_hooks.check_ast import main
from testing.util import get_resource_path

View File

@ -7,7 +7,7 @@ from pre_commit_hooks.check_builtin_literals import main
from pre_commit_hooks.check_builtin_literals import Visitor
BUILTIN_CONSTRUCTORS = '''\
from six.moves import builtins
import builtins
c1 = complex()
d1 = dict()

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit_hooks import check_byte_order_marker

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit_hooks.check_case_conflict import find_conflicting_filenames
from pre_commit_hooks.check_case_conflict import main
from pre_commit_hooks.util import cmd_output

View File

@ -1,7 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.check_docstring_first import check_docstring_first

View File

@ -1,7 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.check_executables_have_shebangs import main
@ -12,7 +8,7 @@ from pre_commit_hooks.check_executables_have_shebangs import main
b'#!/bin/bash\nhello world\n',
b'#!/usr/bin/env python3.6',
b'#!python',
'#!☃'.encode('UTF-8'),
'#!☃'.encode(),
),
)
def test_has_shebang(content, tmpdir):
@ -27,7 +23,7 @@ def test_has_shebang(content, tmpdir):
b' #!python\n',
b'\n#!python\n',
b'python\n',
''.encode('UTF-8'),
''.encode(),
),
)
@ -36,4 +32,4 @@ def test_bad_shebang(content, tmpdir, capsys):
path.write(content, 'wb')
assert main((path.strpath,)) == 1
_, stderr = capsys.readouterr()
assert stderr.startswith('{}: marked executable but'.format(path.strpath))
assert stderr.startswith(f'{path}: marked executable but')

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit_hooks.check_toml import main

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit_hooks.check_vcs_permalinks import main

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.check_yaml import main

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.util import cmd_output

View File

@ -1,7 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import ast
from pre_commit_hooks.debug_statement_hook import Debug

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import pytest
@ -129,9 +126,6 @@ def test_not_ok_input_alternate_pragma():
@pytest.mark.parametrize(
('input_s', 'expected'),
(
# Python 2 cli parameters are bytes
(b'# coding: utf-8', b'# coding: utf-8'),
# Python 3 cli parameters are text
('# coding: utf-8', b'# coding: utf-8'),
# trailing whitespace
('# coding: utf-8\n', b'# coding: utf-8'),
@ -149,7 +143,7 @@ def test_integration_alternate_pragma(tmpdir, capsys):
assert main((f.strpath, '--pragma', pragma)) == 1
assert f.read() == '# coding: utf-8\nx = 1\n'
out, _ = capsys.readouterr()
assert out == 'Added `# coding: utf-8` to {}\n'.format(f.strpath)
assert out == f'Added `# coding: utf-8` to {f.strpath}\n'
def test_crlf_ok(tmpdir):

View File

@ -1,5 +1,3 @@
from __future__ import absolute_import
import subprocess
import pytest

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.mixed_line_ending import main
@ -86,7 +83,7 @@ def test_no_fix_does_not_modify(tmpdir, capsys):
assert ret == 1
assert path.read_binary() == contents
out, _ = capsys.readouterr()
assert out == '{}: mixed line endings\n'.format(path)
assert out == f'{path}: mixed line endings\n'
def test_fix_lf(tmpdir, capsys):
@ -97,7 +94,7 @@ def test_fix_lf(tmpdir, capsys):
assert ret == 1
assert path.read_binary() == b'foo\nbar\nbaz\n'
out, _ = capsys.readouterr()
assert out == '{}: fixed mixed line endings\n'.format(path)
assert out == f'{path}: fixed mixed line endings\n'
def test_fix_crlf(tmpdir):

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.no_commit_to_branch import is_on_branch

View File

@ -2,7 +2,6 @@ import os
import shutil
import pytest
from six import PY2
from pre_commit_hooks.pretty_format_json import main
from pre_commit_hooks.pretty_format_json import parse_num_to_int
@ -42,7 +41,6 @@ def test_unsorted_main(filename, expected_retval):
assert ret == expected_retval
@pytest.mark.skipif(PY2, reason='Requires Python3')
@pytest.mark.parametrize(
('filename', 'expected_retval'), (
('not_pretty_formatted_json.json', 1),
@ -52,7 +50,7 @@ def test_unsorted_main(filename, expected_retval):
('tab_pretty_formatted_json.json', 0),
),
)
def test_tab_main(filename, expected_retval): # pragma: no cover
def test_tab_main(filename, expected_retval):
ret = main(['--indent', '\t', get_resource_path(filename)])
assert ret == expected_retval
@ -113,9 +111,9 @@ def test_diffing_output(capsys):
expected_retval = 1
a = os.path.join('a', resource_path)
b = os.path.join('b', resource_path)
expected_out = '''\
--- {}
+++ {}
expected_out = f'''\
--- {a}
+++ {b}
@@ -1,6 +1,9 @@
{{
- "foo":
@ -130,7 +128,7 @@ def test_diffing_output(capsys):
+ "blah": null,
+ "foo": "bar"
}}
'''.format(a, b)
'''
actual_retval = main([resource_path])
actual_out, actual_err = capsys.readouterr()

View File

@ -1,15 +1,10 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import io
from pre_commit_hooks.check_yaml import yaml
def test_readme_contains_all_hooks():
with io.open('README.md', encoding='UTF-8') as f:
with open('README.md', encoding='UTF-8') as f:
readme_contents = f.read()
with io.open('.pre-commit-hooks.yaml', encoding='UTF-8') as f:
with open('.pre-commit-hooks.yaml', encoding='UTF-8') as f:
hooks = yaml.load(f)
for hook in hooks:
assert '`{}`'.format(hook['id']) in readme_contents
assert f'`{hook["id"]}`' in readme_contents

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import pytest

View File

@ -1,7 +1,3 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import textwrap
import pytest

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.trailing_whitespace_fixer import main
@ -46,7 +43,7 @@ def test_fixes_markdown_files(tmpdir, ext):
'\t\n' # trailing tabs are stripped anyway
'\n ', # whitespace at the end of the file is removed
)
ret = main((path.strpath, '--markdown-linebreak-ext={}'.format(ext)))
ret = main((path.strpath, f'--markdown-linebreak-ext={ext}'))
assert ret == 1
assert path.read() == (
'foo \n'

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from pre_commit_hooks.util import CalledProcessError

View File

@ -1,5 +1,5 @@
[tox]
envlist = py27,py36,py37,pypy,pypy3,pre-commit
envlist = py36,py37,py38,pypy3,pre-commit
[testenv]
deps = -rrequirements-dev.txt