strings.py 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. # Copyright (c) 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
  2. # Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
  3. # Copyright (c) 2012-2014 Google, Inc.
  4. # Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
  5. # Copyright (c) 2014 Brett Cannon <brett@python.org>
  6. # Copyright (c) 2014 Arun Persaud <arun@nubati.net>
  7. # Copyright (c) 2015 Rene Zhang <rz99@cornell.edu>
  8. # Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
  9. # Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
  10. # Copyright (c) 2016 Peter Dawyndt <Peter.Dawyndt@UGent.be>
  11. # Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
  12. # Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
  13. # Copyright (c) 2018, 2020 Anthony Sottile <asottile@umich.edu>
  14. # Copyright (c) 2018-2019 Lucas Cimon <lucas.cimon@gmail.com>
  15. # Copyright (c) 2018 Alan Chan <achan961117@gmail.com>
  16. # Copyright (c) 2018 Yury Gribov <tetra2005@gmail.com>
  17. # Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
  18. # Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
  19. # Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
  20. # Copyright (c) 2019 Wes Turner <westurner@google.com>
  21. # Copyright (c) 2019 Djailla <bastien.vallet@gmail.com>
  22. # Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
  23. # Copyright (c) 2020 Matthew Suozzo <msuozzo@google.com>
  24. # Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
  25. # Copyright (c) 2020 谭九鼎 <109224573@qq.com>
  26. # Copyright (c) 2020 Anthony <tanant@users.noreply.github.com>
  27. # Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
  28. # Copyright (c) 2021 Tushar Sadhwani <tushar.sadhwani000@gmail.com>
  29. # Copyright (c) 2021 Jaehoon Hwang <jaehoonhwang@users.noreply.github.com>
  30. # Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
  31. # Copyright (c) 2021 Peter Kolbus <peter.kolbus@garmin.com>
  32. # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
  33. # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
  34. """Checker for string formatting operations.
  35. """
  36. import collections
  37. import numbers
  38. import re
  39. import tokenize
  40. from typing import Counter, Iterable
  41. import astroid
  42. from astroid import nodes
  43. from pylint.checkers import BaseChecker, BaseTokenChecker, utils
  44. from pylint.checkers.utils import check_messages
  45. from pylint.interfaces import IAstroidChecker, IRawChecker, ITokenChecker
  46. _AST_NODE_STR_TYPES = ("__builtin__.unicode", "__builtin__.str", "builtins.str")
  47. # Prefixes for both strings and bytes literals per
  48. # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
  49. _PREFIXES = {
  50. "r",
  51. "u",
  52. "R",
  53. "U",
  54. "f",
  55. "F",
  56. "fr",
  57. "Fr",
  58. "fR",
  59. "FR",
  60. "rf",
  61. "rF",
  62. "Rf",
  63. "RF",
  64. "b",
  65. "B",
  66. "br",
  67. "Br",
  68. "bR",
  69. "BR",
  70. "rb",
  71. "rB",
  72. "Rb",
  73. "RB",
  74. }
  75. SINGLE_QUOTED_REGEX = re.compile(f"({'|'.join(_PREFIXES)})?'''")
  76. DOUBLE_QUOTED_REGEX = re.compile(f"({'|'.join(_PREFIXES)})?\"\"\"")
  77. QUOTE_DELIMITER_REGEX = re.compile(f"({'|'.join(_PREFIXES)})?(\"|')", re.DOTALL)
  78. MSGS = { # pylint: disable=consider-using-namedtuple-or-dataclass
  79. "E1300": (
  80. "Unsupported format character %r (%#02x) at index %d",
  81. "bad-format-character",
  82. "Used when an unsupported format character is used in a format string.",
  83. ),
  84. "E1301": (
  85. "Format string ends in middle of conversion specifier",
  86. "truncated-format-string",
  87. "Used when a format string terminates before the end of a "
  88. "conversion specifier.",
  89. ),
  90. "E1302": (
  91. "Mixing named and unnamed conversion specifiers in format string",
  92. "mixed-format-string",
  93. "Used when a format string contains both named (e.g. '%(foo)d') "
  94. "and unnamed (e.g. '%d') conversion specifiers. This is also "
  95. "used when a named conversion specifier contains * for the "
  96. "minimum field width and/or precision.",
  97. ),
  98. "E1303": (
  99. "Expected mapping for format string, not %s",
  100. "format-needs-mapping",
  101. "Used when a format string that uses named conversion specifiers "
  102. "is used with an argument that is not a mapping.",
  103. ),
  104. "W1300": (
  105. "Format string dictionary key should be a string, not %s",
  106. "bad-format-string-key",
  107. "Used when a format string that uses named conversion specifiers "
  108. "is used with a dictionary whose keys are not all strings.",
  109. ),
  110. "W1301": (
  111. "Unused key %r in format string dictionary",
  112. "unused-format-string-key",
  113. "Used when a format string that uses named conversion specifiers "
  114. "is used with a dictionary that contains keys not required by the "
  115. "format string.",
  116. ),
  117. "E1304": (
  118. "Missing key %r in format string dictionary",
  119. "missing-format-string-key",
  120. "Used when a format string that uses named conversion specifiers "
  121. "is used with a dictionary that doesn't contain all the keys "
  122. "required by the format string.",
  123. ),
  124. "E1305": (
  125. "Too many arguments for format string",
  126. "too-many-format-args",
  127. "Used when a format string that uses unnamed conversion "
  128. "specifiers is given too many arguments.",
  129. ),
  130. "E1306": (
  131. "Not enough arguments for format string",
  132. "too-few-format-args",
  133. "Used when a format string that uses unnamed conversion "
  134. "specifiers is given too few arguments",
  135. ),
  136. "E1307": (
  137. "Argument %r does not match format type %r",
  138. "bad-string-format-type",
  139. "Used when a type required by format string "
  140. "is not suitable for actual argument type",
  141. ),
  142. "E1310": (
  143. "Suspicious argument in %s.%s call",
  144. "bad-str-strip-call",
  145. "The argument to a str.{l,r,}strip call contains a duplicate character, ",
  146. ),
  147. "W1302": (
  148. "Invalid format string",
  149. "bad-format-string",
  150. "Used when a PEP 3101 format string is invalid.",
  151. ),
  152. "W1303": (
  153. "Missing keyword argument %r for format string",
  154. "missing-format-argument-key",
  155. "Used when a PEP 3101 format string that uses named fields "
  156. "doesn't receive one or more required keywords.",
  157. ),
  158. "W1304": (
  159. "Unused format argument %r",
  160. "unused-format-string-argument",
  161. "Used when a PEP 3101 format string that uses named "
  162. "fields is used with an argument that "
  163. "is not required by the format string.",
  164. ),
  165. "W1305": (
  166. "Format string contains both automatic field numbering "
  167. "and manual field specification",
  168. "format-combined-specification",
  169. "Used when a PEP 3101 format string contains both automatic "
  170. "field numbering (e.g. '{}') and manual field "
  171. "specification (e.g. '{0}').",
  172. ),
  173. "W1306": (
  174. "Missing format attribute %r in format specifier %r",
  175. "missing-format-attribute",
  176. "Used when a PEP 3101 format string uses an "
  177. "attribute specifier ({0.length}), but the argument "
  178. "passed for formatting doesn't have that attribute.",
  179. ),
  180. "W1307": (
  181. "Using invalid lookup key %r in format specifier %r",
  182. "invalid-format-index",
  183. "Used when a PEP 3101 format string uses a lookup specifier "
  184. "({a[1]}), but the argument passed for formatting "
  185. "doesn't contain or doesn't have that key as an attribute.",
  186. ),
  187. "W1308": (
  188. "Duplicate string formatting argument %r, consider passing as named argument",
  189. "duplicate-string-formatting-argument",
  190. "Used when we detect that a string formatting is "
  191. "repeating an argument instead of using named string arguments",
  192. ),
  193. "W1309": (
  194. "Using an f-string that does not have any interpolated variables",
  195. "f-string-without-interpolation",
  196. "Used when we detect an f-string that does not use any interpolation variables, "
  197. "in which case it can be either a normal string or a bug in the code.",
  198. ),
  199. "W1310": (
  200. "Using formatting for a string that does not have any interpolated variables",
  201. "format-string-without-interpolation",
  202. "Used when we detect a string that does not have any interpolation variables, "
  203. "in which case it can be either a normal string without formatting or a bug in the code.",
  204. ),
  205. }
  206. OTHER_NODES = (
  207. nodes.Const,
  208. nodes.List,
  209. nodes.Lambda,
  210. nodes.FunctionDef,
  211. nodes.ListComp,
  212. nodes.SetComp,
  213. nodes.GeneratorExp,
  214. )
  215. def get_access_path(key, parts):
  216. """Given a list of format specifiers, returns
  217. the final access path (e.g. a.b.c[0][1]).
  218. """
  219. path = []
  220. for is_attribute, specifier in parts:
  221. if is_attribute:
  222. path.append(f".{specifier}")
  223. else:
  224. path.append(f"[{specifier!r}]")
  225. return str(key) + "".join(path)
  226. def arg_matches_format_type(arg_type, format_type):
  227. if format_type in "sr":
  228. # All types can be printed with %s and %r
  229. return True
  230. if isinstance(arg_type, astroid.Instance):
  231. arg_type = arg_type.pytype()
  232. if arg_type == "builtins.str":
  233. return format_type == "c"
  234. if arg_type == "builtins.float":
  235. return format_type in "deEfFgGn%"
  236. if arg_type == "builtins.int":
  237. # Integers allow all types
  238. return True
  239. return False
  240. return True
  241. class StringFormatChecker(BaseChecker):
  242. """Checks string formatting operations to ensure that the format string
  243. is valid and the arguments match the format string.
  244. """
  245. __implements__ = (IAstroidChecker,)
  246. name = "string"
  247. msgs = MSGS
  248. # pylint: disable=too-many-branches
  249. @check_messages(
  250. "bad-format-character",
  251. "truncated-format-string",
  252. "mixed-format-string",
  253. "bad-format-string-key",
  254. "missing-format-string-key",
  255. "unused-format-string-key",
  256. "bad-string-format-type",
  257. "format-needs-mapping",
  258. "too-many-format-args",
  259. "too-few-format-args",
  260. "bad-string-format-type",
  261. "format-string-without-interpolation",
  262. )
  263. def visit_binop(self, node: nodes.BinOp) -> None:
  264. if node.op != "%":
  265. return
  266. left = node.left
  267. args = node.right
  268. if not (isinstance(left, nodes.Const) and isinstance(left.value, str)):
  269. return
  270. format_string = left.value
  271. try:
  272. (
  273. required_keys,
  274. required_num_args,
  275. required_key_types,
  276. required_arg_types,
  277. ) = utils.parse_format_string(format_string)
  278. except utils.UnsupportedFormatCharacter as exc:
  279. formatted = format_string[exc.index]
  280. self.add_message(
  281. "bad-format-character",
  282. node=node,
  283. args=(formatted, ord(formatted), exc.index),
  284. )
  285. return
  286. except utils.IncompleteFormatString:
  287. self.add_message("truncated-format-string", node=node)
  288. return
  289. if not required_keys and not required_num_args:
  290. self.add_message("format-string-without-interpolation", node=node)
  291. return
  292. if required_keys and required_num_args:
  293. # The format string uses both named and unnamed format
  294. # specifiers.
  295. self.add_message("mixed-format-string", node=node)
  296. elif required_keys:
  297. # The format string uses only named format specifiers.
  298. # Check that the RHS of the % operator is a mapping object
  299. # that contains precisely the set of keys required by the
  300. # format string.
  301. if isinstance(args, nodes.Dict):
  302. keys = set()
  303. unknown_keys = False
  304. for k, _ in args.items:
  305. if isinstance(k, nodes.Const):
  306. key = k.value
  307. if isinstance(key, str):
  308. keys.add(key)
  309. else:
  310. self.add_message(
  311. "bad-format-string-key", node=node, args=key
  312. )
  313. else:
  314. # One of the keys was something other than a
  315. # constant. Since we can't tell what it is,
  316. # suppress checks for missing keys in the
  317. # dictionary.
  318. unknown_keys = True
  319. if not unknown_keys:
  320. for key in required_keys:
  321. if key not in keys:
  322. self.add_message(
  323. "missing-format-string-key", node=node, args=key
  324. )
  325. for key in keys:
  326. if key not in required_keys:
  327. self.add_message(
  328. "unused-format-string-key", node=node, args=key
  329. )
  330. for key, arg in args.items:
  331. if not isinstance(key, nodes.Const):
  332. continue
  333. format_type = required_key_types.get(key.value, None)
  334. arg_type = utils.safe_infer(arg)
  335. if (
  336. format_type is not None
  337. and arg_type
  338. and arg_type != astroid.Uninferable
  339. and not arg_matches_format_type(arg_type, format_type)
  340. ):
  341. self.add_message(
  342. "bad-string-format-type",
  343. node=node,
  344. args=(arg_type.pytype(), format_type),
  345. )
  346. elif isinstance(args, (OTHER_NODES, nodes.Tuple)):
  347. type_name = type(args).__name__
  348. self.add_message("format-needs-mapping", node=node, args=type_name)
  349. # else:
  350. # The RHS of the format specifier is a name or
  351. # expression. It may be a mapping object, so
  352. # there's nothing we can check.
  353. else:
  354. # The format string uses only unnamed format specifiers.
  355. # Check that the number of arguments passed to the RHS of
  356. # the % operator matches the number required by the format
  357. # string.
  358. args_elts = []
  359. if isinstance(args, nodes.Tuple):
  360. rhs_tuple = utils.safe_infer(args)
  361. num_args = None
  362. if isinstance(rhs_tuple, nodes.BaseContainer):
  363. args_elts = rhs_tuple.elts
  364. num_args = len(args_elts)
  365. elif isinstance(args, (OTHER_NODES, (nodes.Dict, nodes.DictComp))):
  366. args_elts = [args]
  367. num_args = 1
  368. else:
  369. # The RHS of the format specifier is a name or
  370. # expression. It could be a tuple of unknown size, so
  371. # there's nothing we can check.
  372. num_args = None
  373. if num_args is not None:
  374. if num_args > required_num_args:
  375. self.add_message("too-many-format-args", node=node)
  376. elif num_args < required_num_args:
  377. self.add_message("too-few-format-args", node=node)
  378. for arg, format_type in zip(args_elts, required_arg_types):
  379. if not arg:
  380. continue
  381. arg_type = utils.safe_infer(arg)
  382. if (
  383. arg_type
  384. and arg_type != astroid.Uninferable
  385. and not arg_matches_format_type(arg_type, format_type)
  386. ):
  387. self.add_message(
  388. "bad-string-format-type",
  389. node=node,
  390. args=(arg_type.pytype(), format_type),
  391. )
  392. @check_messages("f-string-without-interpolation")
  393. def visit_joinedstr(self, node: nodes.JoinedStr) -> None:
  394. self._check_interpolation(node)
  395. def _check_interpolation(self, node: nodes.JoinedStr) -> None:
  396. if isinstance(node.parent, nodes.FormattedValue):
  397. return
  398. for value in node.values:
  399. if isinstance(value, nodes.FormattedValue):
  400. return
  401. self.add_message("f-string-without-interpolation", node=node)
  402. @check_messages(*MSGS)
  403. def visit_call(self, node: nodes.Call) -> None:
  404. func = utils.safe_infer(node.func)
  405. if (
  406. isinstance(func, astroid.BoundMethod)
  407. and isinstance(func.bound, astroid.Instance)
  408. and func.bound.name in {"str", "unicode", "bytes"}
  409. ):
  410. if func.name in {"strip", "lstrip", "rstrip"} and node.args:
  411. arg = utils.safe_infer(node.args[0])
  412. if not isinstance(arg, nodes.Const) or not isinstance(arg.value, str):
  413. return
  414. if len(arg.value) != len(set(arg.value)):
  415. self.add_message(
  416. "bad-str-strip-call",
  417. node=node,
  418. args=(func.bound.name, func.name),
  419. )
  420. elif func.name == "format":
  421. self._check_new_format(node, func)
  422. def _detect_vacuous_formatting(self, node, positional_arguments):
  423. counter = collections.Counter(
  424. arg.name for arg in positional_arguments if isinstance(arg, nodes.Name)
  425. )
  426. for name, count in counter.items():
  427. if count == 1:
  428. continue
  429. self.add_message(
  430. "duplicate-string-formatting-argument", node=node, args=(name,)
  431. )
  432. def _check_new_format(self, node, func):
  433. """Check the new string formatting."""
  434. # Skip format nodes which don't have an explicit string on the
  435. # left side of the format operation.
  436. # We do this because our inference engine can't properly handle
  437. # redefinitions of the original string.
  438. # Note that there may not be any left side at all, if the format method
  439. # has been assigned to another variable. See issue 351. For example:
  440. #
  441. # fmt = 'some string {}'.format
  442. # fmt('arg')
  443. if isinstance(node.func, nodes.Attribute) and not isinstance(
  444. node.func.expr, nodes.Const
  445. ):
  446. return
  447. if node.starargs or node.kwargs:
  448. return
  449. try:
  450. strnode = next(func.bound.infer())
  451. except astroid.InferenceError:
  452. return
  453. if not (isinstance(strnode, nodes.Const) and isinstance(strnode.value, str)):
  454. return
  455. try:
  456. call_site = astroid.arguments.CallSite.from_call(node)
  457. except astroid.InferenceError:
  458. return
  459. try:
  460. fields, num_args, manual_pos = utils.parse_format_method_string(
  461. strnode.value
  462. )
  463. except utils.IncompleteFormatString:
  464. self.add_message("bad-format-string", node=node)
  465. return
  466. positional_arguments = call_site.positional_arguments
  467. named_arguments = call_site.keyword_arguments
  468. named_fields = {field[0] for field in fields if isinstance(field[0], str)}
  469. if num_args and manual_pos:
  470. self.add_message("format-combined-specification", node=node)
  471. return
  472. check_args = False
  473. # Consider "{[0]} {[1]}" as num_args.
  474. num_args += sum(1 for field in named_fields if field == "")
  475. if named_fields:
  476. for field in named_fields:
  477. if field and field not in named_arguments:
  478. self.add_message(
  479. "missing-format-argument-key", node=node, args=(field,)
  480. )
  481. for field in named_arguments:
  482. if field not in named_fields:
  483. self.add_message(
  484. "unused-format-string-argument", node=node, args=(field,)
  485. )
  486. # num_args can be 0 if manual_pos is not.
  487. num_args = num_args or manual_pos
  488. if positional_arguments or num_args:
  489. empty = any(field == "" for field in named_fields)
  490. if named_arguments or empty:
  491. # Verify the required number of positional arguments
  492. # only if the .format got at least one keyword argument.
  493. # This means that the format strings accepts both
  494. # positional and named fields and we should warn
  495. # when one of the them is missing or is extra.
  496. check_args = True
  497. else:
  498. check_args = True
  499. if check_args:
  500. # num_args can be 0 if manual_pos is not.
  501. num_args = num_args or manual_pos
  502. if not num_args:
  503. self.add_message("format-string-without-interpolation", node=node)
  504. return
  505. if len(positional_arguments) > num_args:
  506. self.add_message("too-many-format-args", node=node)
  507. elif len(positional_arguments) < num_args:
  508. self.add_message("too-few-format-args", node=node)
  509. self._detect_vacuous_formatting(node, positional_arguments)
  510. self._check_new_format_specifiers(node, fields, named_arguments)
  511. def _check_new_format_specifiers(self, node, fields, named):
  512. """
  513. Check attribute and index access in the format
  514. string ("{0.a}" and "{0[a]}").
  515. """
  516. for key, specifiers in fields:
  517. # Obtain the argument. If it can't be obtained
  518. # or inferred, skip this check.
  519. if key == "":
  520. # {[0]} will have an unnamed argument, defaulting
  521. # to 0. It will not be present in `named`, so use the value
  522. # 0 for it.
  523. key = 0
  524. if isinstance(key, numbers.Number):
  525. try:
  526. argname = utils.get_argument_from_call(node, key)
  527. except utils.NoSuchArgumentError:
  528. continue
  529. else:
  530. if key not in named:
  531. continue
  532. argname = named[key]
  533. if argname in (astroid.Uninferable, None):
  534. continue
  535. try:
  536. argument = utils.safe_infer(argname)
  537. except astroid.InferenceError:
  538. continue
  539. if not specifiers or not argument:
  540. # No need to check this key if it doesn't
  541. # use attribute / item access
  542. continue
  543. if argument.parent and isinstance(argument.parent, nodes.Arguments):
  544. # Ignore any object coming from an argument,
  545. # because we can't infer its value properly.
  546. continue
  547. previous = argument
  548. parsed = []
  549. for is_attribute, specifier in specifiers:
  550. if previous is astroid.Uninferable:
  551. break
  552. parsed.append((is_attribute, specifier))
  553. if is_attribute:
  554. try:
  555. previous = previous.getattr(specifier)[0]
  556. except astroid.NotFoundError:
  557. if (
  558. hasattr(previous, "has_dynamic_getattr")
  559. and previous.has_dynamic_getattr()
  560. ):
  561. # Don't warn if the object has a custom __getattr__
  562. break
  563. path = get_access_path(key, parsed)
  564. self.add_message(
  565. "missing-format-attribute",
  566. args=(specifier, path),
  567. node=node,
  568. )
  569. break
  570. else:
  571. warn_error = False
  572. if hasattr(previous, "getitem"):
  573. try:
  574. previous = previous.getitem(nodes.Const(specifier))
  575. except (
  576. astroid.AstroidIndexError,
  577. astroid.AstroidTypeError,
  578. astroid.AttributeInferenceError,
  579. ):
  580. warn_error = True
  581. except astroid.InferenceError:
  582. break
  583. if previous is astroid.Uninferable:
  584. break
  585. else:
  586. try:
  587. # Lookup __getitem__ in the current node,
  588. # but skip further checks, because we can't
  589. # retrieve the looked object
  590. previous.getattr("__getitem__")
  591. break
  592. except astroid.NotFoundError:
  593. warn_error = True
  594. if warn_error:
  595. path = get_access_path(key, parsed)
  596. self.add_message(
  597. "invalid-format-index", args=(specifier, path), node=node
  598. )
  599. break
  600. try:
  601. previous = next(previous.infer())
  602. except astroid.InferenceError:
  603. # can't check further if we can't infer it
  604. break
  605. class StringConstantChecker(BaseTokenChecker):
  606. """Check string literals"""
  607. __implements__ = (IAstroidChecker, ITokenChecker, IRawChecker)
  608. name = "string"
  609. msgs = {
  610. "W1401": (
  611. "Anomalous backslash in string: '%s'. "
  612. "String constant might be missing an r prefix.",
  613. "anomalous-backslash-in-string",
  614. "Used when a backslash is in a literal string but not as an escape.",
  615. ),
  616. "W1402": (
  617. "Anomalous Unicode escape in byte string: '%s'. "
  618. "String constant might be missing an r or u prefix.",
  619. "anomalous-unicode-escape-in-string",
  620. "Used when an escape like \\u is encountered in a byte "
  621. "string where it has no effect.",
  622. ),
  623. "W1404": (
  624. "Implicit string concatenation found in %s",
  625. "implicit-str-concat",
  626. "String literals are implicitly concatenated in a "
  627. "literal iterable definition : "
  628. "maybe a comma is missing ?",
  629. {"old_names": [("W1403", "implicit-str-concat-in-sequence")]},
  630. ),
  631. "W1405": (
  632. "Quote delimiter %s is inconsistent with the rest of the file",
  633. "inconsistent-quotes",
  634. "Quote delimiters are not used consistently throughout a module "
  635. "(with allowances made for avoiding unnecessary escaping).",
  636. ),
  637. "W1406": (
  638. "The u prefix for strings is no longer necessary in Python >=3.0",
  639. "redundant-u-string-prefix",
  640. "Used when we detect a string with a u prefix. These prefixes were necessary "
  641. "in Python 2 to indicate a string was Unicode, but since Python 3.0 strings "
  642. "are Unicode by default.",
  643. ),
  644. }
  645. options = (
  646. (
  647. "check-str-concat-over-line-jumps",
  648. {
  649. "default": False,
  650. "type": "yn",
  651. "metavar": "<y or n>",
  652. "help": "This flag controls whether the "
  653. "implicit-str-concat should generate a warning "
  654. "on implicit string concatenation in sequences defined over "
  655. "several lines.",
  656. },
  657. ),
  658. (
  659. "check-quote-consistency",
  660. {
  661. "default": False,
  662. "type": "yn",
  663. "metavar": "<y or n>",
  664. "help": "This flag controls whether inconsistent-quotes generates a "
  665. "warning when the character used as a quote delimiter is used "
  666. "inconsistently within a module.",
  667. },
  668. ),
  669. )
  670. # Characters that have a special meaning after a backslash in either
  671. # Unicode or byte strings.
  672. ESCAPE_CHARACTERS = "abfnrtvx\n\r\t\\'\"01234567"
  673. # Characters that have a special meaning after a backslash but only in
  674. # Unicode strings.
  675. UNICODE_ESCAPE_CHARACTERS = "uUN"
  676. def __init__(self, *args, **kwargs):
  677. super().__init__(*args, **kwargs)
  678. self.string_tokens = {} # token position -> (token value, next token)
  679. def process_module(self, node: nodes.Module) -> None:
  680. self._unicode_literals = "unicode_literals" in node.future_imports
  681. def process_tokens(self, tokens):
  682. encoding = "ascii"
  683. for i, (tok_type, token, start, _, line) in enumerate(tokens):
  684. if tok_type == tokenize.ENCODING:
  685. # this is always the first token processed
  686. encoding = token
  687. elif tok_type == tokenize.STRING:
  688. # 'token' is the whole un-parsed token; we can look at the start
  689. # of it to see whether it's a raw or unicode string etc.
  690. self.process_string_token(token, start[0], start[1])
  691. # We figure the next token, ignoring comments & newlines:
  692. j = i + 1
  693. while j < len(tokens) and tokens[j].type in (
  694. tokenize.NEWLINE,
  695. tokenize.NL,
  696. tokenize.COMMENT,
  697. ):
  698. j += 1
  699. next_token = tokens[j] if j < len(tokens) else None
  700. if encoding != "ascii":
  701. # We convert `tokenize` character count into a byte count,
  702. # to match with astroid `.col_offset`
  703. start = (start[0], len(line[: start[1]].encode(encoding)))
  704. self.string_tokens[start] = (str_eval(token), next_token)
  705. if self.config.check_quote_consistency:
  706. self.check_for_consistent_string_delimiters(tokens)
  707. @check_messages("implicit-str-concat")
  708. def visit_list(self, node: nodes.List) -> None:
  709. self.check_for_concatenated_strings(node.elts, "list")
  710. @check_messages("implicit-str-concat")
  711. def visit_set(self, node: nodes.Set) -> None:
  712. self.check_for_concatenated_strings(node.elts, "set")
  713. @check_messages("implicit-str-concat")
  714. def visit_tuple(self, node: nodes.Tuple) -> None:
  715. self.check_for_concatenated_strings(node.elts, "tuple")
  716. def visit_assign(self, node: nodes.Assign) -> None:
  717. if isinstance(node.value, nodes.Const) and isinstance(node.value.value, str):
  718. self.check_for_concatenated_strings([node.value], "assignment")
  719. def check_for_consistent_string_delimiters(
  720. self, tokens: Iterable[tokenize.TokenInfo]
  721. ) -> None:
  722. """Adds a message for each string using inconsistent quote delimiters.
  723. Quote delimiters are used inconsistently if " and ' are mixed in a module's
  724. shortstrings without having done so to avoid escaping an internal quote
  725. character.
  726. Args:
  727. tokens: The tokens to be checked against for consistent usage.
  728. """
  729. string_delimiters: Counter[str] = collections.Counter()
  730. # First, figure out which quote character predominates in the module
  731. for tok_type, token, _, _, _ in tokens:
  732. if tok_type == tokenize.STRING and _is_quote_delimiter_chosen_freely(token):
  733. string_delimiters[_get_quote_delimiter(token)] += 1
  734. if len(string_delimiters) > 1:
  735. # Ties are broken arbitrarily
  736. most_common_delimiter = string_delimiters.most_common(1)[0][0]
  737. for tok_type, token, start, _, _ in tokens:
  738. if tok_type != tokenize.STRING:
  739. continue
  740. quote_delimiter = _get_quote_delimiter(token)
  741. if (
  742. _is_quote_delimiter_chosen_freely(token)
  743. and quote_delimiter != most_common_delimiter
  744. ):
  745. self.add_message(
  746. "inconsistent-quotes", line=start[0], args=(quote_delimiter,)
  747. )
  748. def check_for_concatenated_strings(self, elements, iterable_type):
  749. for elt in elements:
  750. if not (
  751. isinstance(elt, nodes.Const) and elt.pytype() in _AST_NODE_STR_TYPES
  752. ):
  753. continue
  754. if elt.col_offset < 0:
  755. # This can happen in case of escaped newlines
  756. continue
  757. if (elt.lineno, elt.col_offset) not in self.string_tokens:
  758. # This may happen with Latin1 encoding
  759. # cf. https://github.com/PyCQA/pylint/issues/2610
  760. continue
  761. matching_token, next_token = self.string_tokens[
  762. (elt.lineno, elt.col_offset)
  763. ]
  764. # We detect string concatenation: the AST Const is the
  765. # combination of 2 string tokens
  766. if matching_token != elt.value and next_token is not None:
  767. if next_token.type == tokenize.STRING and (
  768. next_token.start[0] == elt.lineno
  769. or self.config.check_str_concat_over_line_jumps
  770. ):
  771. self.add_message(
  772. "implicit-str-concat", line=elt.lineno, args=(iterable_type,)
  773. )
  774. def process_string_token(self, token, start_row, start_col):
  775. quote_char = None
  776. index = None
  777. for index, char in enumerate(token):
  778. if char in "'\"":
  779. quote_char = char
  780. break
  781. if quote_char is None:
  782. return
  783. prefix = token[:index].lower() # markers like u, b, r.
  784. after_prefix = token[index:]
  785. # Chop off quotes
  786. quote_length = (
  787. 3 if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char else 1
  788. )
  789. string_body = after_prefix[quote_length:-quote_length]
  790. # No special checks on raw strings at the moment.
  791. if "r" not in prefix:
  792. self.process_non_raw_string_token(
  793. prefix,
  794. string_body,
  795. start_row,
  796. start_col + len(prefix) + quote_length,
  797. )
  798. def process_non_raw_string_token(
  799. self, prefix, string_body, start_row, string_start_col
  800. ):
  801. """check for bad escapes in a non-raw string.
  802. prefix: lowercase string of eg 'ur' string prefix markers.
  803. string_body: the un-parsed body of the string, not including the quote
  804. marks.
  805. start_row: integer line number in the source.
  806. string_start_col: integer col number of the string start in the source.
  807. """
  808. # Walk through the string; if we see a backslash then escape the next
  809. # character, and skip over it. If we see a non-escaped character,
  810. # alert, and continue.
  811. #
  812. # Accept a backslash when it escapes a backslash, or a quote, or
  813. # end-of-line, or one of the letters that introduce a special escape
  814. # sequence <https://docs.python.org/reference/lexical_analysis.html>
  815. #
  816. index = 0
  817. while True:
  818. index = string_body.find("\\", index)
  819. if index == -1:
  820. break
  821. # There must be a next character; having a backslash at the end
  822. # of the string would be a SyntaxError.
  823. next_char = string_body[index + 1]
  824. match = string_body[index : index + 2]
  825. # The column offset will vary depending on whether the string token
  826. # is broken across lines. Calculate relative to the nearest line
  827. # break or relative to the start of the token's line.
  828. last_newline = string_body.rfind("\n", 0, index)
  829. if last_newline == -1:
  830. line = start_row
  831. col_offset = index + string_start_col
  832. else:
  833. line = start_row + string_body.count("\n", 0, index)
  834. col_offset = index - last_newline - 1
  835. if next_char in self.UNICODE_ESCAPE_CHARACTERS:
  836. if "u" in prefix:
  837. pass
  838. elif "b" not in prefix:
  839. pass # unicode by default
  840. else:
  841. self.add_message(
  842. "anomalous-unicode-escape-in-string",
  843. line=line,
  844. args=(match,),
  845. col_offset=col_offset,
  846. )
  847. elif next_char not in self.ESCAPE_CHARACTERS:
  848. self.add_message(
  849. "anomalous-backslash-in-string",
  850. line=line,
  851. args=(match,),
  852. col_offset=col_offset,
  853. )
  854. # Whether it was a valid escape or not, backslash followed by
  855. # another character can always be consumed whole: the second
  856. # character can never be the start of a new backslash escape.
  857. index += 2
  858. @check_messages("redundant-u-string-prefix")
  859. def visit_const(self, node: nodes.Const) -> None:
  860. if node.pytype() == "builtins.str" and not isinstance(
  861. node.parent, nodes.JoinedStr
  862. ):
  863. self._detect_u_string_prefix(node)
  864. def _detect_u_string_prefix(self, node: nodes.Const):
  865. """Check whether strings include a 'u' prefix like u'String'"""
  866. if node.kind == "u":
  867. self.add_message(
  868. "redundant-u-string-prefix",
  869. line=node.lineno,
  870. col_offset=node.col_offset,
  871. )
  872. def register(linter):
  873. """required method to auto register this checker"""
  874. linter.register_checker(StringFormatChecker(linter))
  875. linter.register_checker(StringConstantChecker(linter))
  876. def str_eval(token):
  877. """
  878. Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
  879. This supports f-strings, contrary to `ast.literal_eval`.
  880. We have to support all string literal notations:
  881. https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
  882. """
  883. if token[0:2].lower() in {"fr", "rf"}:
  884. token = token[2:]
  885. elif token[0].lower() in {"r", "u", "f"}:
  886. token = token[1:]
  887. if token[0:3] in {'"""', "'''"}:
  888. return token[3:-3]
  889. return token[1:-1]
  890. def _is_long_string(string_token: str) -> bool:
  891. """Is this string token a "longstring" (is it triple-quoted)?
  892. Long strings are triple-quoted as defined in
  893. https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
  894. This function only checks characters up through the open quotes. Because it's meant
  895. to be applied only to tokens that represent string literals, it doesn't bother to
  896. check for close-quotes (demonstrating that the literal is a well-formed string).
  897. Args:
  898. string_token: The string token to be parsed.
  899. Returns:
  900. A boolean representing whether or not this token matches a longstring
  901. regex.
  902. """
  903. return bool(
  904. SINGLE_QUOTED_REGEX.match(string_token)
  905. or DOUBLE_QUOTED_REGEX.match(string_token)
  906. )
  907. def _get_quote_delimiter(string_token: str) -> str:
  908. """Returns the quote character used to delimit this token string.
  909. This function does little checking for whether the token is a well-formed
  910. string.
  911. Args:
  912. string_token: The token to be parsed.
  913. Returns:
  914. A string containing solely the first quote delimiter character in the passed
  915. string.
  916. Raises:
  917. ValueError: No quote delimiter characters are present.
  918. """
  919. match = QUOTE_DELIMITER_REGEX.match(string_token)
  920. if not match:
  921. raise ValueError(f"string token {string_token} is not a well-formed string")
  922. return match.group(2)
  923. def _is_quote_delimiter_chosen_freely(string_token: str) -> bool:
  924. """Was there a non-awkward option for the quote delimiter?
  925. Args:
  926. string_token: The quoted string whose delimiters are to be checked.
  927. Returns:
  928. Whether there was a choice in this token's quote character that would
  929. not have involved backslash-escaping an interior quote character. Long
  930. strings are excepted from this analysis under the assumption that their
  931. quote characters are set by policy.
  932. """
  933. quote_delimiter = _get_quote_delimiter(string_token)
  934. unchosen_delimiter = '"' if quote_delimiter == "'" else "'"
  935. return bool(
  936. quote_delimiter
  937. and not _is_long_string(string_token)
  938. and unchosen_delimiter not in str_eval(string_token)
  939. )