similar.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944
  1. # Copyright (c) 2006, 2008-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
  2. # Copyright (c) 2012 Ry4an Brase <ry4an-hg@ry4an.org>
  3. # Copyright (c) 2012 Google, Inc.
  4. # Copyright (c) 2012 Anthony VEREZ <anthony.verez.external@cassidian.com>
  5. # Copyright (c) 2014-2020 Claudiu Popa <pcmanticore@gmail.com>
  6. # Copyright (c) 2014 Brett Cannon <brett@python.org>
  7. # Copyright (c) 2014 Arun Persaud <arun@nubati.net>
  8. # Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
  9. # Copyright (c) 2017, 2020 Anthony Sottile <asottile@umich.edu>
  10. # Copyright (c) 2017 Mikhail Fesenko <proggga@gmail.com>
  11. # Copyright (c) 2018 Scott Worley <scottworley@scottworley.com>
  12. # Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
  13. # Copyright (c) 2019, 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
  14. # Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
  15. # Copyright (c) 2019 Taewon D. Kim <kimt33@mcmaster.ca>
  16. # Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
  17. # Copyright (c) 2020 Frank Harrison <frank@doublethefish.com>
  18. # Copyright (c) 2020 Eli Fine <ejfine@gmail.com>
  19. # Copyright (c) 2020 Shiv Venkatasubrahmanyam <shvenkat@users.noreply.github.com>
  20. # Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
  21. # Copyright (c) 2021 Ville Skyttä <ville.skytta@iki.fi>
  22. # Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
  23. # Copyright (c) 2021 Maksym Humetskyi <Humetsky@gmail.com>
  24. # Copyright (c) 2021 bot <bot@noreply.github.com>
  25. # Copyright (c) 2021 Aditya Gupta <adityagupta1089@users.noreply.github.com>
  26. # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
  27. # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
  28. """a similarities / code duplication command line tool and pylint checker
  29. The algorithm is based on comparing the hash value of n successive lines of a file.
  30. First the files are read and any line that doesn't fullfill requirement are removed (comments, docstrings...)
  31. Those stripped lines are stored in the LineSet class which gives access to them.
  32. Then each index of the stripped lines collection is associated with the hash of n successive entries of the stripped lines starting at the current index
  33. (n is the minimum common lines option).
  34. The common hashes between both linesets are then looked for. If there are matches, then the match indices in both linesets are stored and associated
  35. with the corresponding couples (start line number/end line number) in both files.
  36. This association is then postprocessed to handle the case of successive matches. For example if the minimum common lines setting is set to four, then
  37. the hashes are computed with four lines. If one of match indices couple (12, 34) is the successor of another one (11, 33) then it means that there are
  38. in fact five lines which are common.
  39. Once postprocessed the values of association table are the result looked for, i.e start and end lines numbers of common lines in both files.
  40. """
  41. import copy
  42. import functools
  43. import itertools
  44. import operator
  45. import re
  46. import sys
  47. from collections import defaultdict
  48. from getopt import getopt
  49. from io import BufferedIOBase, BufferedReader, BytesIO
  50. from itertools import chain, groupby
  51. from typing import (
  52. Any,
  53. Dict,
  54. FrozenSet,
  55. Generator,
  56. Iterable,
  57. List,
  58. NamedTuple,
  59. NewType,
  60. Optional,
  61. Set,
  62. TextIO,
  63. Tuple,
  64. Union,
  65. )
  66. import astroid
  67. from astroid import nodes
  68. from pylint.checkers import BaseChecker, MapReduceMixin, table_lines_from_stats
  69. from pylint.interfaces import IRawChecker
  70. from pylint.reporters.ureports.nodes import Table
  71. from pylint.utils import LinterStats, decoding_stream
  72. DEFAULT_MIN_SIMILARITY_LINE = 4
  73. REGEX_FOR_LINES_WITH_CONTENT = re.compile(r".*\w+")
  74. # Index defines a location in a LineSet stripped lines collection
  75. Index = NewType("Index", int)
  76. # LineNumber defines a location in a LinesSet real lines collection (the whole file lines)
  77. LineNumber = NewType("LineNumber", int)
  78. # LineSpecifs holds characteristics of a line in a file
  79. class LineSpecifs(NamedTuple):
  80. line_number: LineNumber
  81. text: str
  82. # Links LinesChunk object to the starting indices (in lineset's stripped lines)
  83. # of the different chunk of lines that are used to compute the hash
  84. HashToIndex_T = Dict["LinesChunk", List[Index]]
  85. # Links index in the lineset's stripped lines to the real lines in the file
  86. IndexToLines_T = Dict[Index, "SuccessiveLinesLimits"]
  87. # The types the streams read by pylint can take. Originating from astroid.nodes.Module.stream() and open()
  88. STREAM_TYPES = Union[TextIO, BufferedReader, BytesIO]
  89. class CplSuccessiveLinesLimits:
  90. """
  91. This class holds a couple of SuccessiveLinesLimits objects, one for each file compared,
  92. and a counter on the number of common lines between both stripped lines collections extracted
  93. from both files
  94. """
  95. __slots__ = ("first_file", "second_file", "effective_cmn_lines_nb")
  96. def __init__(
  97. self,
  98. first_file: "SuccessiveLinesLimits",
  99. second_file: "SuccessiveLinesLimits",
  100. effective_cmn_lines_nb: int,
  101. ) -> None:
  102. self.first_file = first_file
  103. self.second_file = second_file
  104. self.effective_cmn_lines_nb = effective_cmn_lines_nb
  105. # Links the indices ot the starting line in both lineset's stripped lines to
  106. # the start and end lines in both files
  107. CplIndexToCplLines_T = Dict["LineSetStartCouple", CplSuccessiveLinesLimits]
  108. class LinesChunk:
  109. """
  110. The LinesChunk object computes and stores the hash of some consecutive stripped lines of a lineset.
  111. """
  112. __slots__ = ("_fileid", "_index", "_hash")
  113. def __init__(self, fileid: str, num_line: int, *lines: Iterable[str]) -> None:
  114. self._fileid: str = fileid
  115. """The name of the file from which the LinesChunk object is generated """
  116. self._index: Index = Index(num_line)
  117. """The index in the stripped lines that is the starting of consecutive lines"""
  118. self._hash: int = sum(hash(lin) for lin in lines)
  119. """The hash of some consecutive lines"""
  120. def __eq__(self, o: Any) -> bool:
  121. if not isinstance(o, LinesChunk):
  122. return NotImplemented
  123. return self._hash == o._hash
  124. def __hash__(self) -> int:
  125. return self._hash
  126. def __repr__(self) -> str:
  127. return (
  128. f"<LinesChunk object for file {self._fileid} ({self._index}, {self._hash})>"
  129. )
  130. def __str__(self) -> str:
  131. return (
  132. f"LinesChunk object for file {self._fileid}, starting at line {self._index} \n"
  133. f"Hash is {self._hash}"
  134. )
  135. class SuccessiveLinesLimits:
  136. """
  137. A class to handle the numbering of begin and end of successive lines.
  138. :note: Only the end line number can be updated.
  139. """
  140. __slots__ = ("_start", "_end")
  141. def __init__(self, start: LineNumber, end: LineNumber) -> None:
  142. self._start: LineNumber = start
  143. self._end: LineNumber = end
  144. @property
  145. def start(self) -> LineNumber:
  146. return self._start
  147. @property
  148. def end(self) -> LineNumber:
  149. return self._end
  150. @end.setter
  151. def end(self, value: LineNumber) -> None:
  152. self._end = value
  153. def __repr__(self) -> str:
  154. return f"<SuccessiveLinesLimits <{self._start};{self._end}>>"
  155. class LineSetStartCouple(NamedTuple):
  156. """
  157. Indices in both linesets that mark the beginning of successive lines
  158. """
  159. fst_lineset_index: Index
  160. snd_lineset_index: Index
  161. def __repr__(self) -> str:
  162. return (
  163. f"<LineSetStartCouple <{self.fst_lineset_index};{self.snd_lineset_index}>>"
  164. )
  165. def __eq__(self, other) -> bool:
  166. if not isinstance(other, LineSetStartCouple):
  167. return NotImplemented
  168. return (
  169. self.fst_lineset_index == other.fst_lineset_index
  170. and self.snd_lineset_index == other.snd_lineset_index
  171. )
  172. def __hash__(self) -> int:
  173. return hash(self.fst_lineset_index) + hash(self.snd_lineset_index)
  174. def increment(self, value: Index) -> "LineSetStartCouple":
  175. return LineSetStartCouple(
  176. Index(self.fst_lineset_index + value),
  177. Index(self.snd_lineset_index + value),
  178. )
  179. LinesChunkLimits_T = Tuple["LineSet", LineNumber, LineNumber]
  180. def hash_lineset(
  181. lineset: "LineSet", min_common_lines: int = DEFAULT_MIN_SIMILARITY_LINE
  182. ) -> Tuple[HashToIndex_T, IndexToLines_T]:
  183. """
  184. Return two dicts. The first associates the hash of successive stripped lines of a lineset
  185. to the indices of the starting lines.
  186. The second dict, associates the index of the starting line in the lineset's stripped lines to the
  187. couple [start, end] lines number in the corresponding file.
  188. :param lineset: lineset object (i.e the lines in a file)
  189. :param min_common_lines: number of successive lines that are used to compute the hash
  190. :return: a dict linking hashes to corresponding start index and a dict that links this
  191. index to the start and end lines in the file
  192. """
  193. hash2index = defaultdict(list)
  194. index2lines = {}
  195. # Comments, docstring and other specific patterns maybe excluded -> call to stripped_lines
  196. # to get only what is desired
  197. lines = tuple(x.text for x in lineset.stripped_lines)
  198. # Need different iterators on same lines but each one is shifted 1 from the precedent
  199. shifted_lines = [iter(lines[i:]) for i in range(min_common_lines)]
  200. for index_i, *succ_lines in enumerate(zip(*shifted_lines)):
  201. start_linenumber = lineset.stripped_lines[index_i].line_number
  202. try:
  203. end_linenumber = lineset.stripped_lines[
  204. index_i + min_common_lines
  205. ].line_number
  206. except IndexError:
  207. end_linenumber = lineset.stripped_lines[-1].line_number + 1
  208. index = Index(index_i)
  209. index2lines[index] = SuccessiveLinesLimits(
  210. start=LineNumber(start_linenumber), end=LineNumber(end_linenumber)
  211. )
  212. l_c = LinesChunk(lineset.name, index, *succ_lines)
  213. hash2index[l_c].append(index)
  214. return hash2index, index2lines
  215. def remove_successives(all_couples: CplIndexToCplLines_T) -> None:
  216. """
  217. Removes all successive entries in the dictionary in argument
  218. :param all_couples: collection that has to be cleaned up from successives entries.
  219. The keys are couples of indices that mark the beginning of common entries
  220. in both linesets. The values have two parts. The first one is the couple
  221. of starting and ending line numbers of common successives lines in the first file.
  222. The second part is the same for the second file.
  223. For example consider the following dict:
  224. >>> all_couples
  225. {(11, 34): ([5, 9], [27, 31]),
  226. (23, 79): ([15, 19], [45, 49]),
  227. (12, 35): ([6, 10], [28, 32])}
  228. There are two successives keys (11, 34) and (12, 35).
  229. It means there are two consecutive similar chunks of lines in both files.
  230. Thus remove last entry and update the last line numbers in the first entry
  231. >>> remove_successives(all_couples)
  232. >>> all_couples
  233. {(11, 34): ([5, 10], [27, 32]),
  234. (23, 79): ([15, 19], [45, 49])}
  235. """
  236. couple: LineSetStartCouple
  237. for couple in tuple(all_couples.keys()):
  238. to_remove = []
  239. test = couple.increment(Index(1))
  240. while test in all_couples:
  241. all_couples[couple].first_file.end = all_couples[test].first_file.end
  242. all_couples[couple].second_file.end = all_couples[test].second_file.end
  243. all_couples[couple].effective_cmn_lines_nb += 1
  244. to_remove.append(test)
  245. test = test.increment(Index(1))
  246. for target in to_remove:
  247. try:
  248. all_couples.pop(target)
  249. except KeyError:
  250. pass
  251. def filter_noncode_lines(
  252. ls_1: "LineSet",
  253. stindex_1: Index,
  254. ls_2: "LineSet",
  255. stindex_2: Index,
  256. common_lines_nb: int,
  257. ) -> int:
  258. """
  259. Return the effective number of common lines between lineset1 and lineset2 filtered from non code lines, that is to say the number of
  260. common successive stripped lines except those that do not contain code (for example a ligne with only an
  261. ending parathensis)
  262. :param ls_1: first lineset
  263. :param stindex_1: first lineset starting index
  264. :param ls_2: second lineset
  265. :param stindex_2: second lineset starting index
  266. :param common_lines_nb: number of common successive stripped lines before being filtered from non code lines
  267. :return: the number of common successives stripped lines that contain code
  268. """
  269. stripped_l1 = [
  270. lspecif.text
  271. for lspecif in ls_1.stripped_lines[stindex_1 : stindex_1 + common_lines_nb]
  272. if REGEX_FOR_LINES_WITH_CONTENT.match(lspecif.text)
  273. ]
  274. stripped_l2 = [
  275. lspecif.text
  276. for lspecif in ls_2.stripped_lines[stindex_2 : stindex_2 + common_lines_nb]
  277. if REGEX_FOR_LINES_WITH_CONTENT.match(lspecif.text)
  278. ]
  279. return sum(sline_1 == sline_2 for sline_1, sline_2 in zip(stripped_l1, stripped_l2))
  280. class Commonality(NamedTuple):
  281. cmn_lines_nb: int
  282. fst_lset: "LineSet"
  283. fst_file_start: LineNumber
  284. fst_file_end: LineNumber
  285. snd_lset: "LineSet"
  286. snd_file_start: LineNumber
  287. snd_file_end: LineNumber
  288. class Similar:
  289. """finds copy-pasted lines of code in a project"""
  290. def __init__(
  291. self,
  292. min_lines: int = DEFAULT_MIN_SIMILARITY_LINE,
  293. ignore_comments: bool = False,
  294. ignore_docstrings: bool = False,
  295. ignore_imports: bool = False,
  296. ignore_signatures: bool = False,
  297. ) -> None:
  298. self.min_lines = min_lines
  299. self.ignore_comments = ignore_comments
  300. self.ignore_docstrings = ignore_docstrings
  301. self.ignore_imports = ignore_imports
  302. self.ignore_signatures = ignore_signatures
  303. self.linesets: List["LineSet"] = []
  304. def append_stream(
  305. self, streamid: str, stream: STREAM_TYPES, encoding: Optional[str] = None
  306. ) -> None:
  307. """append a file to search for similarities"""
  308. if isinstance(stream, BufferedIOBase):
  309. if encoding is None:
  310. raise ValueError
  311. readlines = decoding_stream(stream, encoding).readlines
  312. else:
  313. readlines = stream.readlines # type: ignore[assignment] # hint parameter is incorrectly typed as non-optional
  314. try:
  315. self.linesets.append(
  316. LineSet(
  317. streamid,
  318. readlines(),
  319. self.ignore_comments,
  320. self.ignore_docstrings,
  321. self.ignore_imports,
  322. self.ignore_signatures,
  323. )
  324. )
  325. except UnicodeDecodeError:
  326. pass
  327. def run(self) -> None:
  328. """start looking for similarities and display results on stdout"""
  329. if self.min_lines == 0:
  330. return
  331. self._display_sims(self._compute_sims())
  332. def _compute_sims(self) -> List[Tuple[int, Set[LinesChunkLimits_T]]]:
  333. """compute similarities in appended files"""
  334. no_duplicates: Dict[int, List[Set[LinesChunkLimits_T]]] = defaultdict(list)
  335. for commonality in self._iter_sims():
  336. num = commonality.cmn_lines_nb
  337. lineset1 = commonality.fst_lset
  338. start_line_1 = commonality.fst_file_start
  339. end_line_1 = commonality.fst_file_end
  340. lineset2 = commonality.snd_lset
  341. start_line_2 = commonality.snd_file_start
  342. end_line_2 = commonality.snd_file_end
  343. duplicate = no_duplicates[num]
  344. couples: Set[LinesChunkLimits_T]
  345. for couples in duplicate:
  346. if (lineset1, start_line_1, end_line_1) in couples or (
  347. lineset2,
  348. start_line_2,
  349. end_line_2,
  350. ) in couples:
  351. break
  352. else:
  353. duplicate.append(
  354. {
  355. (lineset1, start_line_1, end_line_1),
  356. (lineset2, start_line_2, end_line_2),
  357. }
  358. )
  359. sims: List[Tuple[int, Set[LinesChunkLimits_T]]] = []
  360. ensembles: List[Set[LinesChunkLimits_T]]
  361. for num, ensembles in no_duplicates.items():
  362. cpls: Set[LinesChunkLimits_T]
  363. for cpls in ensembles:
  364. sims.append((num, cpls))
  365. sims.sort()
  366. sims.reverse()
  367. return sims
  368. def _display_sims(
  369. self, similarities: List[Tuple[int, Set[LinesChunkLimits_T]]]
  370. ) -> None:
  371. """Display computed similarities on stdout"""
  372. report = self._get_similarity_report(similarities)
  373. print(report)
  374. def _get_similarity_report(
  375. self, similarities: List[Tuple[int, Set[LinesChunkLimits_T]]]
  376. ) -> str:
  377. """Create a report from similarities"""
  378. report: str = ""
  379. duplicated_line_number: int = 0
  380. for number, couples in similarities:
  381. report += f"\n{number} similar lines in {len(couples)} files\n"
  382. couples_l = sorted(couples)
  383. line_set = start_line = end_line = None
  384. for line_set, start_line, end_line in couples_l:
  385. report += f"=={line_set.name}:[{start_line}:{end_line}]\n"
  386. if line_set:
  387. for line in line_set._real_lines[start_line:end_line]:
  388. report += f" {line.rstrip()}\n" if line.rstrip() else "\n"
  389. duplicated_line_number += number * (len(couples_l) - 1)
  390. total_line_number: int = sum(len(lineset) for lineset in self.linesets)
  391. report += f"TOTAL lines={total_line_number} duplicates={duplicated_line_number} percent={duplicated_line_number * 100.0 / total_line_number:.2f}\n"
  392. return report
  393. def _find_common(
  394. self, lineset1: "LineSet", lineset2: "LineSet"
  395. ) -> Generator[Commonality, None, None]:
  396. """
  397. Find similarities in the two given linesets.
  398. This the core of the algorithm.
  399. The idea is to compute the hashes of a minimal number of successive lines of each lineset and then compare the hashes.
  400. Every match of such comparison is stored in a dict that links the couple of starting indices in both linesets to
  401. the couple of corresponding starting and ending lines in both files.
  402. Last regroups all successive couples in a bigger one. It allows to take into account common chunk of lines that have more
  403. than the minimal number of successive lines required.
  404. """
  405. hash_to_index_1: HashToIndex_T
  406. hash_to_index_2: HashToIndex_T
  407. index_to_lines_1: IndexToLines_T
  408. index_to_lines_2: IndexToLines_T
  409. hash_to_index_1, index_to_lines_1 = hash_lineset(lineset1, self.min_lines)
  410. hash_to_index_2, index_to_lines_2 = hash_lineset(lineset2, self.min_lines)
  411. hash_1: FrozenSet[LinesChunk] = frozenset(hash_to_index_1.keys())
  412. hash_2: FrozenSet[LinesChunk] = frozenset(hash_to_index_2.keys())
  413. common_hashes: Iterable[LinesChunk] = sorted(
  414. hash_1 & hash_2, key=lambda m: hash_to_index_1[m][0]
  415. )
  416. # all_couples is a dict that links the couple of indices in both linesets that mark the beginning of
  417. # successive common lines, to the corresponding starting and ending number lines in both files
  418. all_couples: CplIndexToCplLines_T = {}
  419. for c_hash in sorted(common_hashes, key=operator.attrgetter("_index")):
  420. for indices_in_linesets in itertools.product(
  421. hash_to_index_1[c_hash], hash_to_index_2[c_hash]
  422. ):
  423. index_1 = indices_in_linesets[0]
  424. index_2 = indices_in_linesets[1]
  425. all_couples[
  426. LineSetStartCouple(index_1, index_2)
  427. ] = CplSuccessiveLinesLimits(
  428. copy.copy(index_to_lines_1[index_1]),
  429. copy.copy(index_to_lines_2[index_2]),
  430. effective_cmn_lines_nb=self.min_lines,
  431. )
  432. remove_successives(all_couples)
  433. for cml_stripped_l, cmn_l in all_couples.items():
  434. start_index_1 = cml_stripped_l.fst_lineset_index
  435. start_index_2 = cml_stripped_l.snd_lineset_index
  436. nb_common_lines = cmn_l.effective_cmn_lines_nb
  437. com = Commonality(
  438. cmn_lines_nb=nb_common_lines,
  439. fst_lset=lineset1,
  440. fst_file_start=cmn_l.first_file.start,
  441. fst_file_end=cmn_l.first_file.end,
  442. snd_lset=lineset2,
  443. snd_file_start=cmn_l.second_file.start,
  444. snd_file_end=cmn_l.second_file.end,
  445. )
  446. eff_cmn_nb = filter_noncode_lines(
  447. lineset1, start_index_1, lineset2, start_index_2, nb_common_lines
  448. )
  449. if eff_cmn_nb > self.min_lines:
  450. yield com
  451. def _iter_sims(self) -> Generator[Commonality, None, None]:
  452. """iterate on similarities among all files, by making a cartesian
  453. product
  454. """
  455. for idx, lineset in enumerate(self.linesets[:-1]):
  456. for lineset2 in self.linesets[idx + 1 :]:
  457. yield from self._find_common(lineset, lineset2)
  458. def get_map_data(self):
  459. """Returns the data we can use for a map/reduce process
  460. In this case we are returning this instance's Linesets, that is all file
  461. information that will later be used for vectorisation.
  462. """
  463. return self.linesets
  464. def combine_mapreduce_data(self, linesets_collection):
  465. """Reduces and recombines data into a format that we can report on
  466. The partner function of get_map_data()"""
  467. self.linesets = [line for lineset in linesets_collection for line in lineset]
  468. def stripped_lines(
  469. lines: Iterable[str],
  470. ignore_comments: bool,
  471. ignore_docstrings: bool,
  472. ignore_imports: bool,
  473. ignore_signatures: bool,
  474. ) -> List[LineSpecifs]:
  475. """
  476. Return tuples of line/line number/line type with leading/trailing whitespace and any ignored code features removed
  477. :param lines: a collection of lines
  478. :param ignore_comments: if true, any comment in the lines collection is removed from the result
  479. :param ignore_docstrings: if true, any line that is a docstring is removed from the result
  480. :param ignore_imports: if true, any line that is an import is removed from the result
  481. :param ignore_signatures: if true, any line that is part of a function signature is removed from the result
  482. :return: the collection of line/line number/line type tuples
  483. """
  484. if ignore_imports or ignore_signatures:
  485. tree = astroid.parse("".join(lines))
  486. if ignore_imports:
  487. node_is_import_by_lineno = (
  488. (node.lineno, isinstance(node, (nodes.Import, nodes.ImportFrom)))
  489. for node in tree.body
  490. )
  491. line_begins_import = {
  492. lineno: all(is_import for _, is_import in node_is_import_group)
  493. for lineno, node_is_import_group in groupby(
  494. node_is_import_by_lineno, key=lambda x: x[0]
  495. )
  496. }
  497. current_line_is_import = False
  498. if ignore_signatures:
  499. def _get_functions(
  500. functions: List[nodes.NodeNG], tree: nodes.NodeNG
  501. ) -> List[nodes.NodeNG]:
  502. """Recursively get all functions including nested in the classes from the tree."""
  503. for node in tree.body:
  504. if isinstance(node, (nodes.FunctionDef, nodes.AsyncFunctionDef)):
  505. functions.append(node)
  506. if isinstance(
  507. node,
  508. (nodes.ClassDef, nodes.FunctionDef, nodes.AsyncFunctionDef),
  509. ):
  510. _get_functions(functions, node)
  511. return functions
  512. functions = _get_functions([], tree)
  513. signature_lines = set(
  514. chain(
  515. *(
  516. range(
  517. func.lineno,
  518. func.body[0].lineno if func.body else func.tolineno + 1,
  519. )
  520. for func in functions
  521. )
  522. )
  523. )
  524. strippedlines = []
  525. docstring = None
  526. for lineno, line in enumerate(lines, start=1):
  527. line = line.strip()
  528. if ignore_docstrings:
  529. if not docstring:
  530. if line.startswith('"""') or line.startswith("'''"):
  531. docstring = line[:3]
  532. line = line[3:]
  533. elif line.startswith('r"""') or line.startswith("r'''"):
  534. docstring = line[1:4]
  535. line = line[4:]
  536. if docstring:
  537. if line.endswith(docstring):
  538. docstring = None
  539. line = ""
  540. if ignore_imports:
  541. current_line_is_import = line_begins_import.get(
  542. lineno, current_line_is_import
  543. )
  544. if current_line_is_import:
  545. line = ""
  546. if ignore_comments:
  547. line = line.split("#", 1)[0].strip()
  548. if ignore_signatures and lineno in signature_lines:
  549. line = ""
  550. if line:
  551. strippedlines.append(
  552. LineSpecifs(text=line, line_number=LineNumber(lineno - 1))
  553. )
  554. return strippedlines
  555. @functools.total_ordering
  556. class LineSet:
  557. """
  558. Holds and indexes all the lines of a single source file.
  559. Allows for correspondence between real lines of the source file and stripped ones, which
  560. are the real ones from which undesired patterns have been removed.
  561. """
  562. def __init__(
  563. self,
  564. name: str,
  565. lines: List[str],
  566. ignore_comments: bool = False,
  567. ignore_docstrings: bool = False,
  568. ignore_imports: bool = False,
  569. ignore_signatures: bool = False,
  570. ) -> None:
  571. self.name = name
  572. self._real_lines = lines
  573. self._stripped_lines = stripped_lines(
  574. lines, ignore_comments, ignore_docstrings, ignore_imports, ignore_signatures
  575. )
  576. def __str__(self):
  577. return f"<Lineset for {self.name}>"
  578. def __len__(self):
  579. return len(self._real_lines)
  580. def __getitem__(self, index):
  581. return self._stripped_lines[index]
  582. def __lt__(self, other):
  583. return self.name < other.name
  584. def __hash__(self):
  585. return id(self)
  586. def __eq__(self, other):
  587. if not isinstance(other, LineSet):
  588. return False
  589. return self.__dict__ == other.__dict__
  590. @property
  591. def stripped_lines(self):
  592. return self._stripped_lines
  593. @property
  594. def real_lines(self):
  595. return self._real_lines
  596. MSGS = {
  597. "R0801": (
  598. "Similar lines in %s files\n%s",
  599. "duplicate-code",
  600. "Indicates that a set of similar lines has been detected "
  601. "among multiple file. This usually means that the code should "
  602. "be refactored to avoid this duplication.",
  603. )
  604. }
  605. def report_similarities(
  606. sect,
  607. stats: LinterStats,
  608. old_stats: Optional[LinterStats],
  609. ) -> None:
  610. """make a layout with some stats about duplication"""
  611. lines = ["", "now", "previous", "difference"]
  612. lines += table_lines_from_stats(stats, old_stats, "duplicated_lines")
  613. sect.append(Table(children=lines, cols=4, rheaders=1, cheaders=1))
  614. # wrapper to get a pylint checker from the similar class
  615. class SimilarChecker(BaseChecker, Similar, MapReduceMixin):
  616. """checks for similarities and duplicated code. This computation may be
  617. memory / CPU intensive, so you should disable it if you experiment some
  618. problems.
  619. """
  620. __implements__ = (IRawChecker,)
  621. # configuration section name
  622. name = "similarities"
  623. # messages
  624. msgs = MSGS
  625. # configuration options
  626. # for available dict keys/values see the optik parser 'add_option' method
  627. options = (
  628. (
  629. "min-similarity-lines",
  630. {
  631. "default": DEFAULT_MIN_SIMILARITY_LINE,
  632. "type": "int",
  633. "metavar": "<int>",
  634. "help": "Minimum lines number of a similarity.",
  635. },
  636. ),
  637. (
  638. "ignore-comments",
  639. {
  640. "default": True,
  641. "type": "yn",
  642. "metavar": "<y or n>",
  643. "help": "Comments are removed from the similarity computation",
  644. },
  645. ),
  646. (
  647. "ignore-docstrings",
  648. {
  649. "default": True,
  650. "type": "yn",
  651. "metavar": "<y or n>",
  652. "help": "Docstrings are removed from the similarity computation",
  653. },
  654. ),
  655. (
  656. "ignore-imports",
  657. {
  658. "default": False,
  659. "type": "yn",
  660. "metavar": "<y or n>",
  661. "help": "Imports are removed from the similarity computation",
  662. },
  663. ),
  664. (
  665. "ignore-signatures",
  666. {
  667. "default": False,
  668. "type": "yn",
  669. "metavar": "<y or n>",
  670. "help": "Signatures are removed from the similarity computation",
  671. },
  672. ),
  673. )
  674. # reports
  675. reports = (("RP0801", "Duplication", report_similarities),)
  676. def __init__(self, linter=None) -> None:
  677. BaseChecker.__init__(self, linter)
  678. Similar.__init__(
  679. self,
  680. min_lines=self.config.min_similarity_lines,
  681. ignore_comments=self.config.ignore_comments,
  682. ignore_docstrings=self.config.ignore_docstrings,
  683. ignore_imports=self.config.ignore_imports,
  684. ignore_signatures=self.config.ignore_signatures,
  685. )
  686. def set_option(self, optname, value, action=None, optdict=None):
  687. """method called to set an option (registered in the options list)
  688. Overridden to report options setting to Similar
  689. """
  690. BaseChecker.set_option(self, optname, value, action, optdict)
  691. if optname == "min-similarity-lines":
  692. self.min_lines = self.config.min_similarity_lines
  693. elif optname == "ignore-comments":
  694. self.ignore_comments = self.config.ignore_comments
  695. elif optname == "ignore-docstrings":
  696. self.ignore_docstrings = self.config.ignore_docstrings
  697. elif optname == "ignore-imports":
  698. self.ignore_imports = self.config.ignore_imports
  699. elif optname == "ignore-signatures":
  700. self.ignore_signatures = self.config.ignore_signatures
  701. def open(self):
  702. """init the checkers: reset linesets and statistics information"""
  703. self.linesets = []
  704. self.linter.stats.reset_duplicated_lines()
  705. def process_module(self, node: nodes.Module) -> None:
  706. """process a module
  707. the module's content is accessible via the stream object
  708. stream must implement the readlines method
  709. """
  710. with node.stream() as stream:
  711. self.append_stream(self.linter.current_name, stream, node.file_encoding)
  712. def close(self):
  713. """compute and display similarities on closing (i.e. end of parsing)"""
  714. total = sum(len(lineset) for lineset in self.linesets)
  715. duplicated = 0
  716. stats = self.linter.stats
  717. for num, couples in self._compute_sims():
  718. msg = []
  719. lineset = start_line = end_line = None
  720. for lineset, start_line, end_line in couples:
  721. msg.append(f"=={lineset.name}:[{start_line}:{end_line}]")
  722. msg.sort()
  723. if lineset:
  724. for line in lineset.real_lines[start_line:end_line]:
  725. msg.append(line.rstrip())
  726. self.add_message("R0801", args=(len(couples), "\n".join(msg)))
  727. duplicated += num * (len(couples) - 1)
  728. stats.nb_duplicated_lines += int(duplicated)
  729. stats.percent_duplicated_lines += float(total and duplicated * 100.0 / total)
  730. def get_map_data(self):
  731. """Passthru override"""
  732. return Similar.get_map_data(self)
  733. def reduce_map_data(self, linter, data):
  734. """Reduces and recombines data into a format that we can report on
  735. The partner function of get_map_data()"""
  736. recombined = SimilarChecker(linter)
  737. recombined.min_lines = self.min_lines
  738. recombined.ignore_comments = self.ignore_comments
  739. recombined.ignore_docstrings = self.ignore_docstrings
  740. recombined.ignore_imports = self.ignore_imports
  741. recombined.ignore_signatures = self.ignore_signatures
  742. recombined.open()
  743. Similar.combine_mapreduce_data(recombined, linesets_collection=data)
  744. recombined.close()
  745. def register(linter):
  746. """required method to auto register this checker"""
  747. linter.register_checker(SimilarChecker(linter))
  748. def usage(status=0):
  749. """display command line usage information"""
  750. print("finds copy pasted blocks in a set of files")
  751. print()
  752. print(
  753. "Usage: symilar [-d|--duplicates min_duplicated_lines] \
  754. [-i|--ignore-comments] [--ignore-docstrings] [--ignore-imports] [--ignore-signatures] file1..."
  755. )
  756. sys.exit(status)
  757. def Run(argv=None):
  758. """standalone command line access point"""
  759. if argv is None:
  760. argv = sys.argv[1:]
  761. s_opts = "hdi"
  762. l_opts = (
  763. "help",
  764. "duplicates=",
  765. "ignore-comments",
  766. "ignore-imports",
  767. "ignore-docstrings",
  768. "ignore-signatures",
  769. )
  770. min_lines = DEFAULT_MIN_SIMILARITY_LINE
  771. ignore_comments = False
  772. ignore_docstrings = False
  773. ignore_imports = False
  774. ignore_signatures = False
  775. opts, args = getopt(argv, s_opts, l_opts)
  776. for opt, val in opts:
  777. if opt in {"-d", "--duplicates"}:
  778. min_lines = int(val)
  779. elif opt in {"-h", "--help"}:
  780. usage()
  781. elif opt in {"-i", "--ignore-comments"}:
  782. ignore_comments = True
  783. elif opt in {"--ignore-docstrings"}:
  784. ignore_docstrings = True
  785. elif opt in {"--ignore-imports"}:
  786. ignore_imports = True
  787. elif opt in {"--ignore-signatures"}:
  788. ignore_signatures = True
  789. if not args:
  790. usage(1)
  791. sim = Similar(
  792. min_lines, ignore_comments, ignore_docstrings, ignore_imports, ignore_signatures
  793. )
  794. for filename in args:
  795. with open(filename, encoding="utf-8") as stream:
  796. sim.append_stream(filename, stream)
  797. sim.run()
  798. sys.exit(0)
  799. if __name__ == "__main__":
  800. Run()