autopep8.py 152 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575
  1. #!/usr/bin/env python
  2. # Copyright (C) 2010-2011 Hideo Hattori
  3. # Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
  4. # Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling
  5. #
  6. # Permission is hereby granted, free of charge, to any person obtaining
  7. # a copy of this software and associated documentation files (the
  8. # "Software"), to deal in the Software without restriction, including
  9. # without limitation the rights to use, copy, modify, merge, publish,
  10. # distribute, sublicense, and/or sell copies of the Software, and to
  11. # permit persons to whom the Software is furnished to do so, subject to
  12. # the following conditions:
  13. #
  14. # The above copyright notice and this permission notice shall be
  15. # included in all copies or substantial portions of the Software.
  16. #
  17. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  20. # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  21. # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  22. # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  23. # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. # SOFTWARE.
  25. # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
  26. # Copyright (C) 2009-2013 Florent Xicluna <florent.xicluna@gmail.com>
  27. #
  28. # Permission is hereby granted, free of charge, to any person
  29. # obtaining a copy of this software and associated documentation files
  30. # (the "Software"), to deal in the Software without restriction,
  31. # including without limitation the rights to use, copy, modify, merge,
  32. # publish, distribute, sublicense, and/or sell copies of the Software,
  33. # and to permit persons to whom the Software is furnished to do so,
  34. # subject to the following conditions:
  35. #
  36. # The above copyright notice and this permission notice shall be
  37. # included in all copies or substantial portions of the Software.
  38. #
  39. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  40. # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  41. # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  42. # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  43. # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  44. # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  45. # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  46. # SOFTWARE.
  47. """Automatically formats Python code to conform to the PEP 8 style guide.
  48. Fixes that only need be done once can be added by adding a function of the form
  49. "fix_<code>(source)" to this module. They should return the fixed source code.
  50. These fixes are picked up by apply_global_fixes().
  51. Fixes that depend on pycodestyle should be added as methods to FixPEP8. See the
  52. class documentation for more information.
  53. """
  54. from __future__ import absolute_import
  55. from __future__ import division
  56. from __future__ import print_function
  57. from __future__ import unicode_literals
  58. import argparse
  59. import codecs
  60. import collections
  61. import copy
  62. import difflib
  63. import fnmatch
  64. import inspect
  65. import io
  66. import itertools
  67. import keyword
  68. import locale
  69. import os
  70. import re
  71. import signal
  72. import sys
  73. import textwrap
  74. import token
  75. import tokenize
  76. import warnings
  77. import ast
  78. try:
  79. from configparser import ConfigParser as SafeConfigParser
  80. from configparser import Error
  81. except ImportError:
  82. from ConfigParser import SafeConfigParser
  83. from ConfigParser import Error
  84. import pycodestyle
  85. from pycodestyle import STARTSWITH_INDENT_STATEMENT_REGEX
  86. try:
  87. unicode
  88. except NameError:
  89. unicode = str
  90. __version__ = '1.6.0'
  91. CR = '\r'
  92. LF = '\n'
  93. CRLF = '\r\n'
  94. PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
  95. LAMBDA_REGEX = re.compile(r'([\w.]+)\s=\slambda\s*([)(=\w,\s.]*):')
  96. COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+([^][)(}{]+?)\s+(in|is)\s')
  97. COMPARE_NEGATIVE_REGEX_THROUGH = re.compile(r'\b(not\s+in|is\s+not)\s')
  98. BARE_EXCEPT_REGEX = re.compile(r'except\s*:')
  99. STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\s.*\):')
  100. DOCSTRING_START_REGEX = re.compile(r'^u?r?(?P<kind>["\']{3})')
  101. ENABLE_REGEX = re.compile(r'# *(fmt|autopep8): *on')
  102. DISABLE_REGEX = re.compile(r'# *(fmt|autopep8): *off')
  103. EXIT_CODE_OK = 0
  104. EXIT_CODE_ERROR = 1
  105. EXIT_CODE_EXISTS_DIFF = 2
  106. EXIT_CODE_ARGPARSE_ERROR = 99
  107. # For generating line shortening candidates.
  108. SHORTEN_OPERATOR_GROUPS = frozenset([
  109. frozenset([',']),
  110. frozenset(['%']),
  111. frozenset([',', '(', '[', '{']),
  112. frozenset(['%', '(', '[', '{']),
  113. frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
  114. frozenset(['%', '+', '-', '*', '/', '//']),
  115. ])
  116. DEFAULT_IGNORE = 'E226,E24,W50,W690' # TODO: use pycodestyle.DEFAULT_IGNORE
  117. DEFAULT_INDENT_SIZE = 4
  118. # these fixes conflict with each other, if the `--ignore` setting causes both
  119. # to be enabled, disable both of them
  120. CONFLICTING_CODES = ('W503', 'W504')
  121. SELECTED_GLOBAL_FIXED_METHOD_CODES = ['W602', ]
  122. # W602 is handled separately due to the need to avoid "with_traceback".
  123. CODE_TO_2TO3 = {
  124. 'E231': ['ws_comma'],
  125. 'E721': ['idioms'],
  126. 'W601': ['has_key'],
  127. 'W603': ['ne'],
  128. 'W604': ['repr'],
  129. 'W690': ['apply',
  130. 'except',
  131. 'exitfunc',
  132. 'numliterals',
  133. 'operator',
  134. 'paren',
  135. 'reduce',
  136. 'renames',
  137. 'standarderror',
  138. 'sys_exc',
  139. 'throw',
  140. 'tuple_params',
  141. 'xreadlines']}
  142. if sys.platform == 'win32': # pragma: no cover
  143. DEFAULT_CONFIG = os.path.expanduser(r'~\.pycodestyle')
  144. else:
  145. DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
  146. os.path.expanduser('~/.config'),
  147. 'pycodestyle')
  148. # fallback, use .pep8
  149. if not os.path.exists(DEFAULT_CONFIG): # pragma: no cover
  150. if sys.platform == 'win32':
  151. DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
  152. else:
  153. DEFAULT_CONFIG = os.path.join(os.path.expanduser('~/.config'), 'pep8')
  154. PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8', '.flake8')
  155. MAX_PYTHON_FILE_DETECTION_BYTES = 1024
  156. def open_with_encoding(filename, mode='r', encoding=None, limit_byte_check=-1):
  157. """Return opened file with a specific encoding."""
  158. if not encoding:
  159. encoding = detect_encoding(filename, limit_byte_check=limit_byte_check)
  160. return io.open(filename, mode=mode, encoding=encoding,
  161. newline='') # Preserve line endings
  162. def detect_encoding(filename, limit_byte_check=-1):
  163. """Return file encoding."""
  164. try:
  165. with open(filename, 'rb') as input_file:
  166. from lib2to3.pgen2 import tokenize as lib2to3_tokenize
  167. encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
  168. with open_with_encoding(filename, encoding=encoding) as test_file:
  169. test_file.read(limit_byte_check)
  170. return encoding
  171. except (LookupError, SyntaxError, UnicodeDecodeError):
  172. return 'latin-1'
  173. def readlines_from_file(filename):
  174. """Return contents of file."""
  175. with open_with_encoding(filename) as input_file:
  176. return input_file.readlines()
  177. def extended_blank_lines(logical_line,
  178. blank_lines,
  179. blank_before,
  180. indent_level,
  181. previous_logical):
  182. """Check for missing blank lines after class declaration."""
  183. if previous_logical.startswith('def '):
  184. if blank_lines and pycodestyle.DOCSTRING_REGEX.match(logical_line):
  185. yield (0, 'E303 too many blank lines ({})'.format(blank_lines))
  186. elif pycodestyle.DOCSTRING_REGEX.match(previous_logical):
  187. # Missing blank line between class docstring and method declaration.
  188. if (
  189. indent_level and
  190. not blank_lines and
  191. not blank_before and
  192. logical_line.startswith(('def ')) and
  193. '(self' in logical_line
  194. ):
  195. yield (0, 'E301 expected 1 blank line, found 0')
  196. pycodestyle.register_check(extended_blank_lines)
  197. def continued_indentation(logical_line, tokens, indent_level, hang_closing,
  198. indent_char, noqa):
  199. """Override pycodestyle's function to provide indentation information."""
  200. first_row = tokens[0][2][0]
  201. nrows = 1 + tokens[-1][2][0] - first_row
  202. if noqa or nrows == 1:
  203. return
  204. # indent_next tells us whether the next block is indented. Assuming
  205. # that it is indented by 4 spaces, then we should not allow 4-space
  206. # indents on the final continuation line. In turn, some other
  207. # indents are allowed to have an extra 4 spaces.
  208. indent_next = logical_line.endswith(':')
  209. row = depth = 0
  210. valid_hangs = (
  211. (DEFAULT_INDENT_SIZE,)
  212. if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
  213. 2 * DEFAULT_INDENT_SIZE)
  214. )
  215. # Remember how many brackets were opened on each line.
  216. parens = [0] * nrows
  217. # Relative indents of physical lines.
  218. rel_indent = [0] * nrows
  219. # For each depth, collect a list of opening rows.
  220. open_rows = [[0]]
  221. # For each depth, memorize the hanging indentation.
  222. hangs = [None]
  223. # Visual indents.
  224. indent_chances = {}
  225. last_indent = tokens[0][2]
  226. indent = [last_indent[1]]
  227. last_token_multiline = None
  228. line = None
  229. last_line = ''
  230. last_line_begins_with_multiline = False
  231. for token_type, text, start, end, line in tokens:
  232. newline = row < start[0] - first_row
  233. if newline:
  234. row = start[0] - first_row
  235. newline = (not last_token_multiline and
  236. token_type not in (tokenize.NL, tokenize.NEWLINE))
  237. last_line_begins_with_multiline = last_token_multiline
  238. if newline:
  239. # This is the beginning of a continuation line.
  240. last_indent = start
  241. # Record the initial indent.
  242. rel_indent[row] = pycodestyle.expand_indent(line) - indent_level
  243. # Identify closing bracket.
  244. close_bracket = (token_type == tokenize.OP and text in ']})')
  245. # Is the indent relative to an opening bracket line?
  246. for open_row in reversed(open_rows[depth]):
  247. hang = rel_indent[row] - rel_indent[open_row]
  248. hanging_indent = hang in valid_hangs
  249. if hanging_indent:
  250. break
  251. if hangs[depth]:
  252. hanging_indent = (hang == hangs[depth])
  253. visual_indent = (not close_bracket and hang > 0 and
  254. indent_chances.get(start[1]))
  255. if close_bracket and indent[depth]:
  256. # Closing bracket for visual indent.
  257. if start[1] != indent[depth]:
  258. yield (start, 'E124 {}'.format(indent[depth]))
  259. elif close_bracket and not hang:
  260. # closing bracket matches indentation of opening bracket's line
  261. if hang_closing:
  262. yield (start, 'E133 {}'.format(indent[depth]))
  263. elif indent[depth] and start[1] < indent[depth]:
  264. if visual_indent is not True:
  265. # Visual indent is broken.
  266. yield (start, 'E128 {}'.format(indent[depth]))
  267. elif (hanging_indent or
  268. (indent_next and
  269. rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
  270. # Hanging indent is verified.
  271. if close_bracket and not hang_closing:
  272. yield (start, 'E123 {}'.format(indent_level +
  273. rel_indent[open_row]))
  274. hangs[depth] = hang
  275. elif visual_indent is True:
  276. # Visual indent is verified.
  277. indent[depth] = start[1]
  278. elif visual_indent in (text, unicode):
  279. # Ignore token lined up with matching one from a previous line.
  280. pass
  281. else:
  282. one_indented = (indent_level + rel_indent[open_row] +
  283. DEFAULT_INDENT_SIZE)
  284. # Indent is broken.
  285. if hang <= 0:
  286. error = ('E122', one_indented)
  287. elif indent[depth]:
  288. error = ('E127', indent[depth])
  289. elif not close_bracket and hangs[depth]:
  290. error = ('E131', one_indented)
  291. elif hang > DEFAULT_INDENT_SIZE:
  292. error = ('E126', one_indented)
  293. else:
  294. hangs[depth] = hang
  295. error = ('E121', one_indented)
  296. yield (start, '{} {}'.format(*error))
  297. # Look for visual indenting.
  298. if (
  299. parens[row] and
  300. token_type not in (tokenize.NL, tokenize.COMMENT) and
  301. not indent[depth]
  302. ):
  303. indent[depth] = start[1]
  304. indent_chances[start[1]] = True
  305. # Deal with implicit string concatenation.
  306. elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
  307. text in ('u', 'ur', 'b', 'br')):
  308. indent_chances[start[1]] = unicode
  309. # Special case for the "if" statement because len("if (") is equal to
  310. # 4.
  311. elif not indent_chances and not row and not depth and text == 'if':
  312. indent_chances[end[1] + 1] = True
  313. elif text == ':' and line[end[1]:].isspace():
  314. open_rows[depth].append(row)
  315. # Keep track of bracket depth.
  316. if token_type == tokenize.OP:
  317. if text in '([{':
  318. depth += 1
  319. indent.append(0)
  320. hangs.append(None)
  321. if len(open_rows) == depth:
  322. open_rows.append([])
  323. open_rows[depth].append(row)
  324. parens[row] += 1
  325. elif text in ')]}' and depth > 0:
  326. # Parent indents should not be more than this one.
  327. prev_indent = indent.pop() or last_indent[1]
  328. hangs.pop()
  329. for d in range(depth):
  330. if indent[d] > prev_indent:
  331. indent[d] = 0
  332. for ind in list(indent_chances):
  333. if ind >= prev_indent:
  334. del indent_chances[ind]
  335. del open_rows[depth + 1:]
  336. depth -= 1
  337. if depth:
  338. indent_chances[indent[depth]] = True
  339. for idx in range(row, -1, -1):
  340. if parens[idx]:
  341. parens[idx] -= 1
  342. break
  343. assert len(indent) == depth + 1
  344. if (
  345. start[1] not in indent_chances and
  346. # This is for purposes of speeding up E121 (GitHub #90).
  347. not last_line.rstrip().endswith(',')
  348. ):
  349. # Allow to line up tokens.
  350. indent_chances[start[1]] = text
  351. last_token_multiline = (start[0] != end[0])
  352. if last_token_multiline:
  353. rel_indent[end[0] - first_row] = rel_indent[row]
  354. last_line = line
  355. if (
  356. indent_next and
  357. not last_line_begins_with_multiline and
  358. pycodestyle.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
  359. ):
  360. pos = (start[0], indent[0] + 4)
  361. desired_indent = indent_level + 2 * DEFAULT_INDENT_SIZE
  362. if visual_indent:
  363. yield (pos, 'E129 {}'.format(desired_indent))
  364. else:
  365. yield (pos, 'E125 {}'.format(desired_indent))
  366. del pycodestyle._checks['logical_line'][pycodestyle.continued_indentation]
  367. pycodestyle.register_check(continued_indentation)
  368. class FixPEP8(object):
  369. """Fix invalid code.
  370. Fixer methods are prefixed "fix_". The _fix_source() method looks for these
  371. automatically.
  372. The fixer method can take either one or two arguments (in addition to
  373. self). The first argument is "result", which is the error information from
  374. pycodestyle. The second argument, "logical", is required only for
  375. logical-line fixes.
  376. The fixer method can return the list of modified lines or None. An empty
  377. list would mean that no changes were made. None would mean that only the
  378. line reported in the pycodestyle error was modified. Note that the modified
  379. line numbers that are returned are indexed at 1. This typically would
  380. correspond with the line number reported in the pycodestyle error
  381. information.
  382. [fixed method list]
  383. - e111,e114,e115,e116
  384. - e121,e122,e123,e124,e125,e126,e127,e128,e129
  385. - e201,e202,e203
  386. - e211
  387. - e221,e222,e223,e224,e225
  388. - e231
  389. - e251,e252
  390. - e261,e262
  391. - e271,e272,e273,e274
  392. - e301,e302,e303,e304,e305,e306
  393. - e401,e402
  394. - e502
  395. - e701,e702,e703,e704
  396. - e711,e712,e713,e714
  397. - e722
  398. - e731
  399. - w291
  400. - w503,504
  401. """
  402. def __init__(self, filename,
  403. options,
  404. contents=None,
  405. long_line_ignore_cache=None):
  406. self.filename = filename
  407. if contents is None:
  408. self.source = readlines_from_file(filename)
  409. else:
  410. sio = io.StringIO(contents)
  411. self.source = sio.readlines()
  412. self.options = options
  413. self.indent_word = _get_indentword(''.join(self.source))
  414. # collect imports line
  415. self.imports = {}
  416. for i, line in enumerate(self.source):
  417. if (line.find("import ") == 0 or line.find("from ") == 0) and \
  418. line not in self.imports:
  419. # collect only import statements that first appeared
  420. self.imports[line] = i
  421. self.long_line_ignore_cache = (
  422. set() if long_line_ignore_cache is None
  423. else long_line_ignore_cache)
  424. # Many fixers are the same even though pycodestyle categorizes them
  425. # differently.
  426. self.fix_e115 = self.fix_e112
  427. self.fix_e121 = self._fix_reindent
  428. self.fix_e122 = self._fix_reindent
  429. self.fix_e123 = self._fix_reindent
  430. self.fix_e124 = self._fix_reindent
  431. self.fix_e126 = self._fix_reindent
  432. self.fix_e127 = self._fix_reindent
  433. self.fix_e128 = self._fix_reindent
  434. self.fix_e129 = self._fix_reindent
  435. self.fix_e133 = self.fix_e131
  436. self.fix_e202 = self.fix_e201
  437. self.fix_e203 = self.fix_e201
  438. self.fix_e211 = self.fix_e201
  439. self.fix_e221 = self.fix_e271
  440. self.fix_e222 = self.fix_e271
  441. self.fix_e223 = self.fix_e271
  442. self.fix_e226 = self.fix_e225
  443. self.fix_e227 = self.fix_e225
  444. self.fix_e228 = self.fix_e225
  445. self.fix_e241 = self.fix_e271
  446. self.fix_e242 = self.fix_e224
  447. self.fix_e252 = self.fix_e225
  448. self.fix_e261 = self.fix_e262
  449. self.fix_e272 = self.fix_e271
  450. self.fix_e273 = self.fix_e271
  451. self.fix_e274 = self.fix_e271
  452. self.fix_e306 = self.fix_e301
  453. self.fix_e501 = (
  454. self.fix_long_line_logically if
  455. options and (options.aggressive >= 2 or options.experimental) else
  456. self.fix_long_line_physically)
  457. self.fix_e703 = self.fix_e702
  458. self.fix_w292 = self.fix_w291
  459. self.fix_w293 = self.fix_w291
  460. def _fix_source(self, results):
  461. try:
  462. (logical_start, logical_end) = _find_logical(self.source)
  463. logical_support = True
  464. except (SyntaxError, tokenize.TokenError): # pragma: no cover
  465. logical_support = False
  466. completed_lines = set()
  467. for result in sorted(results, key=_priority_key):
  468. if result['line'] in completed_lines:
  469. continue
  470. fixed_methodname = 'fix_' + result['id'].lower()
  471. if hasattr(self, fixed_methodname):
  472. fix = getattr(self, fixed_methodname)
  473. line_index = result['line'] - 1
  474. original_line = self.source[line_index]
  475. is_logical_fix = len(_get_parameters(fix)) > 2
  476. if is_logical_fix:
  477. logical = None
  478. if logical_support:
  479. logical = _get_logical(self.source,
  480. result,
  481. logical_start,
  482. logical_end)
  483. if logical and set(range(
  484. logical[0][0] + 1,
  485. logical[1][0] + 1)).intersection(
  486. completed_lines):
  487. continue
  488. modified_lines = fix(result, logical)
  489. else:
  490. modified_lines = fix(result)
  491. if modified_lines is None:
  492. # Force logical fixes to report what they modified.
  493. assert not is_logical_fix
  494. if self.source[line_index] == original_line:
  495. modified_lines = []
  496. if modified_lines:
  497. completed_lines.update(modified_lines)
  498. elif modified_lines == []: # Empty list means no fix
  499. if self.options.verbose >= 2:
  500. print(
  501. '---> Not fixing {error} on line {line}'.format(
  502. error=result['id'], line=result['line']),
  503. file=sys.stderr)
  504. else: # We assume one-line fix when None.
  505. completed_lines.add(result['line'])
  506. else:
  507. if self.options.verbose >= 3:
  508. print(
  509. "---> '{}' is not defined.".format(fixed_methodname),
  510. file=sys.stderr)
  511. info = result['info'].strip()
  512. print('---> {}:{}:{}:{}'.format(self.filename,
  513. result['line'],
  514. result['column'],
  515. info),
  516. file=sys.stderr)
  517. def fix(self):
  518. """Return a version of the source code with PEP 8 violations fixed."""
  519. pep8_options = {
  520. 'ignore': self.options.ignore,
  521. 'select': self.options.select,
  522. 'max_line_length': self.options.max_line_length,
  523. 'hang_closing': self.options.hang_closing,
  524. }
  525. results = _execute_pep8(pep8_options, self.source)
  526. if self.options.verbose:
  527. progress = {}
  528. for r in results:
  529. if r['id'] not in progress:
  530. progress[r['id']] = set()
  531. progress[r['id']].add(r['line'])
  532. print('---> {n} issue(s) to fix {progress}'.format(
  533. n=len(results), progress=progress), file=sys.stderr)
  534. if self.options.line_range:
  535. start, end = self.options.line_range
  536. results = [r for r in results
  537. if start <= r['line'] <= end]
  538. self._fix_source(filter_results(source=''.join(self.source),
  539. results=results,
  540. aggressive=self.options.aggressive))
  541. if self.options.line_range:
  542. # If number of lines has changed then change line_range.
  543. count = sum(sline.count('\n')
  544. for sline in self.source[start - 1:end])
  545. self.options.line_range[1] = start + count - 1
  546. return ''.join(self.source)
  547. def _fix_reindent(self, result):
  548. """Fix a badly indented line.
  549. This is done by adding or removing from its initial indent only.
  550. """
  551. num_indent_spaces = int(result['info'].split()[1])
  552. line_index = result['line'] - 1
  553. target = self.source[line_index]
  554. self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
  555. def fix_e112(self, result):
  556. """Fix under-indented comments."""
  557. line_index = result['line'] - 1
  558. target = self.source[line_index]
  559. if not target.lstrip().startswith('#'):
  560. # Don't screw with invalid syntax.
  561. return []
  562. self.source[line_index] = self.indent_word + target
  563. def fix_e113(self, result):
  564. """Fix unexpected indentation."""
  565. line_index = result['line'] - 1
  566. target = self.source[line_index]
  567. indent = _get_indentation(target)
  568. stripped = target.lstrip()
  569. self.source[line_index] = indent[1:] + stripped
  570. def fix_e116(self, result):
  571. """Fix over-indented comments."""
  572. line_index = result['line'] - 1
  573. target = self.source[line_index]
  574. indent = _get_indentation(target)
  575. stripped = target.lstrip()
  576. if not stripped.startswith('#'):
  577. # Don't screw with invalid syntax.
  578. return []
  579. self.source[line_index] = indent[1:] + stripped
  580. def fix_e117(self, result):
  581. """Fix over-indented."""
  582. line_index = result['line'] - 1
  583. target = self.source[line_index]
  584. indent = _get_indentation(target)
  585. if indent == '\t':
  586. return []
  587. stripped = target.lstrip()
  588. self.source[line_index] = indent[1:] + stripped
  589. def fix_e125(self, result):
  590. """Fix indentation undistinguish from the next logical line."""
  591. num_indent_spaces = int(result['info'].split()[1])
  592. line_index = result['line'] - 1
  593. target = self.source[line_index]
  594. spaces_to_add = num_indent_spaces - len(_get_indentation(target))
  595. indent = len(_get_indentation(target))
  596. modified_lines = []
  597. while len(_get_indentation(self.source[line_index])) >= indent:
  598. self.source[line_index] = (' ' * spaces_to_add +
  599. self.source[line_index])
  600. modified_lines.append(1 + line_index) # Line indexed at 1.
  601. line_index -= 1
  602. return modified_lines
  603. def fix_e131(self, result):
  604. """Fix indentation undistinguish from the next logical line."""
  605. num_indent_spaces = int(result['info'].split()[1])
  606. line_index = result['line'] - 1
  607. target = self.source[line_index]
  608. spaces_to_add = num_indent_spaces - len(_get_indentation(target))
  609. indent_length = len(_get_indentation(target))
  610. spaces_to_add = num_indent_spaces - indent_length
  611. if num_indent_spaces == 0 and indent_length == 0:
  612. spaces_to_add = 4
  613. if spaces_to_add >= 0:
  614. self.source[line_index] = (' ' * spaces_to_add +
  615. self.source[line_index])
  616. else:
  617. offset = abs(spaces_to_add)
  618. self.source[line_index] = self.source[line_index][offset:]
  619. def fix_e201(self, result):
  620. """Remove extraneous whitespace."""
  621. line_index = result['line'] - 1
  622. target = self.source[line_index]
  623. offset = result['column'] - 1
  624. fixed = fix_whitespace(target,
  625. offset=offset,
  626. replacement='')
  627. self.source[line_index] = fixed
  628. def fix_e224(self, result):
  629. """Remove extraneous whitespace around operator."""
  630. target = self.source[result['line'] - 1]
  631. offset = result['column'] - 1
  632. fixed = target[:offset] + target[offset:].replace('\t', ' ')
  633. self.source[result['line'] - 1] = fixed
  634. def fix_e225(self, result):
  635. """Fix missing whitespace around operator."""
  636. target = self.source[result['line'] - 1]
  637. offset = result['column'] - 1
  638. fixed = target[:offset] + ' ' + target[offset:]
  639. # Only proceed if non-whitespace characters match.
  640. # And make sure we don't break the indentation.
  641. if (
  642. fixed.replace(' ', '') == target.replace(' ', '') and
  643. _get_indentation(fixed) == _get_indentation(target)
  644. ):
  645. self.source[result['line'] - 1] = fixed
  646. error_code = result.get('id', 0)
  647. try:
  648. ts = generate_tokens(fixed)
  649. except (SyntaxError, tokenize.TokenError):
  650. return
  651. if not check_syntax(fixed.lstrip()):
  652. return
  653. errors = list(
  654. pycodestyle.missing_whitespace_around_operator(fixed, ts))
  655. for e in reversed(errors):
  656. if error_code != e[1].split()[0]:
  657. continue
  658. offset = e[0][1]
  659. fixed = fixed[:offset] + ' ' + fixed[offset:]
  660. self.source[result['line'] - 1] = fixed
  661. else:
  662. return []
  663. def fix_e231(self, result):
  664. """Add missing whitespace."""
  665. line_index = result['line'] - 1
  666. target = self.source[line_index]
  667. offset = result['column']
  668. fixed = target[:offset].rstrip() + ' ' + target[offset:].lstrip()
  669. self.source[line_index] = fixed
  670. def fix_e251(self, result):
  671. """Remove whitespace around parameter '=' sign."""
  672. line_index = result['line'] - 1
  673. target = self.source[line_index]
  674. # This is necessary since pycodestyle sometimes reports columns that
  675. # goes past the end of the physical line. This happens in cases like,
  676. # foo(bar\n=None)
  677. c = min(result['column'] - 1,
  678. len(target) - 1)
  679. if target[c].strip():
  680. fixed = target
  681. else:
  682. fixed = target[:c].rstrip() + target[c:].lstrip()
  683. # There could be an escaped newline
  684. #
  685. # def foo(a=\
  686. # 1)
  687. if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
  688. self.source[line_index] = fixed.rstrip('\n\r \t\\')
  689. self.source[line_index + 1] = self.source[line_index + 1].lstrip()
  690. return [line_index + 1, line_index + 2] # Line indexed at 1
  691. self.source[result['line'] - 1] = fixed
  692. def fix_e262(self, result):
  693. """Fix spacing after comment hash."""
  694. target = self.source[result['line'] - 1]
  695. offset = result['column']
  696. code = target[:offset].rstrip(' \t#')
  697. comment = target[offset:].lstrip(' \t#')
  698. fixed = code + (' # ' + comment if comment.strip() else '\n')
  699. self.source[result['line'] - 1] = fixed
  700. def fix_e271(self, result):
  701. """Fix extraneous whitespace around keywords."""
  702. line_index = result['line'] - 1
  703. target = self.source[line_index]
  704. offset = result['column'] - 1
  705. fixed = fix_whitespace(target,
  706. offset=offset,
  707. replacement=' ')
  708. if fixed == target:
  709. return []
  710. else:
  711. self.source[line_index] = fixed
  712. def fix_e301(self, result):
  713. """Add missing blank line."""
  714. cr = '\n'
  715. self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
  716. def fix_e302(self, result):
  717. """Add missing 2 blank lines."""
  718. add_linenum = 2 - int(result['info'].split()[-1])
  719. offset = 1
  720. if self.source[result['line'] - 2].strip() == "\\":
  721. offset = 2
  722. cr = '\n' * add_linenum
  723. self.source[result['line'] - offset] = (
  724. cr + self.source[result['line'] - offset]
  725. )
  726. def fix_e303(self, result):
  727. """Remove extra blank lines."""
  728. delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
  729. delete_linenum = max(1, delete_linenum)
  730. # We need to count because pycodestyle reports an offset line number if
  731. # there are comments.
  732. cnt = 0
  733. line = result['line'] - 2
  734. modified_lines = []
  735. while cnt < delete_linenum and line >= 0:
  736. if not self.source[line].strip():
  737. self.source[line] = ''
  738. modified_lines.append(1 + line) # Line indexed at 1
  739. cnt += 1
  740. line -= 1
  741. return modified_lines
  742. def fix_e304(self, result):
  743. """Remove blank line following function decorator."""
  744. line = result['line'] - 2
  745. if not self.source[line].strip():
  746. self.source[line] = ''
  747. def fix_e305(self, result):
  748. """Add missing 2 blank lines after end of function or class."""
  749. add_delete_linenum = 2 - int(result['info'].split()[-1])
  750. cnt = 0
  751. offset = result['line'] - 2
  752. modified_lines = []
  753. if add_delete_linenum < 0:
  754. # delete cr
  755. add_delete_linenum = abs(add_delete_linenum)
  756. while cnt < add_delete_linenum and offset >= 0:
  757. if not self.source[offset].strip():
  758. self.source[offset] = ''
  759. modified_lines.append(1 + offset) # Line indexed at 1
  760. cnt += 1
  761. offset -= 1
  762. else:
  763. # add cr
  764. cr = '\n'
  765. # check comment line
  766. while True:
  767. if offset < 0:
  768. break
  769. line = self.source[offset].lstrip()
  770. if not line:
  771. break
  772. if line[0] != '#':
  773. break
  774. offset -= 1
  775. offset += 1
  776. self.source[offset] = cr + self.source[offset]
  777. modified_lines.append(1 + offset) # Line indexed at 1.
  778. return modified_lines
  779. def fix_e401(self, result):
  780. """Put imports on separate lines."""
  781. line_index = result['line'] - 1
  782. target = self.source[line_index]
  783. offset = result['column'] - 1
  784. if not target.lstrip().startswith('import'):
  785. return []
  786. indentation = re.split(pattern=r'\bimport\b',
  787. string=target, maxsplit=1)[0]
  788. fixed = (target[:offset].rstrip('\t ,') + '\n' +
  789. indentation + 'import ' + target[offset:].lstrip('\t ,'))
  790. self.source[line_index] = fixed
  791. def fix_e402(self, result):
  792. (line_index, offset, target) = get_index_offset_contents(result,
  793. self.source)
  794. for i in range(1, 100):
  795. line = "".join(self.source[line_index:line_index+i])
  796. try:
  797. generate_tokens("".join(line))
  798. except (SyntaxError, tokenize.TokenError):
  799. continue
  800. break
  801. if not (target in self.imports and self.imports[target] != line_index):
  802. mod_offset = get_module_imports_on_top_of_file(self.source,
  803. line_index)
  804. self.source[mod_offset] = line + self.source[mod_offset]
  805. for offset in range(i):
  806. self.source[line_index+offset] = ''
  807. def fix_long_line_logically(self, result, logical):
  808. """Try to make lines fit within --max-line-length characters."""
  809. if (
  810. not logical or
  811. len(logical[2]) == 1 or
  812. self.source[result['line'] - 1].lstrip().startswith('#')
  813. ):
  814. return self.fix_long_line_physically(result)
  815. start_line_index = logical[0][0]
  816. end_line_index = logical[1][0]
  817. logical_lines = logical[2]
  818. previous_line = get_item(self.source, start_line_index - 1, default='')
  819. next_line = get_item(self.source, end_line_index + 1, default='')
  820. single_line = join_logical_line(''.join(logical_lines))
  821. try:
  822. fixed = self.fix_long_line(
  823. target=single_line,
  824. previous_line=previous_line,
  825. next_line=next_line,
  826. original=''.join(logical_lines))
  827. except (SyntaxError, tokenize.TokenError):
  828. return self.fix_long_line_physically(result)
  829. if fixed:
  830. for line_index in range(start_line_index, end_line_index + 1):
  831. self.source[line_index] = ''
  832. self.source[start_line_index] = fixed
  833. return range(start_line_index + 1, end_line_index + 1)
  834. return []
  835. def fix_long_line_physically(self, result):
  836. """Try to make lines fit within --max-line-length characters."""
  837. line_index = result['line'] - 1
  838. target = self.source[line_index]
  839. previous_line = get_item(self.source, line_index - 1, default='')
  840. next_line = get_item(self.source, line_index + 1, default='')
  841. try:
  842. fixed = self.fix_long_line(
  843. target=target,
  844. previous_line=previous_line,
  845. next_line=next_line,
  846. original=target)
  847. except (SyntaxError, tokenize.TokenError):
  848. return []
  849. if fixed:
  850. self.source[line_index] = fixed
  851. return [line_index + 1]
  852. return []
  853. def fix_long_line(self, target, previous_line,
  854. next_line, original):
  855. cache_entry = (target, previous_line, next_line)
  856. if cache_entry in self.long_line_ignore_cache:
  857. return []
  858. if target.lstrip().startswith('#'):
  859. if self.options.aggressive:
  860. # Wrap commented lines.
  861. return shorten_comment(
  862. line=target,
  863. max_line_length=self.options.max_line_length,
  864. last_comment=not next_line.lstrip().startswith('#'))
  865. return []
  866. fixed = get_fixed_long_line(
  867. target=target,
  868. previous_line=previous_line,
  869. original=original,
  870. indent_word=self.indent_word,
  871. max_line_length=self.options.max_line_length,
  872. aggressive=self.options.aggressive,
  873. experimental=self.options.experimental,
  874. verbose=self.options.verbose)
  875. if fixed and not code_almost_equal(original, fixed):
  876. return fixed
  877. self.long_line_ignore_cache.add(cache_entry)
  878. return None
  879. def fix_e502(self, result):
  880. """Remove extraneous escape of newline."""
  881. (line_index, _, target) = get_index_offset_contents(result,
  882. self.source)
  883. self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
  884. def fix_e701(self, result):
  885. """Put colon-separated compound statement on separate lines."""
  886. line_index = result['line'] - 1
  887. target = self.source[line_index]
  888. c = result['column']
  889. fixed_source = (target[:c] + '\n' +
  890. _get_indentation(target) + self.indent_word +
  891. target[c:].lstrip('\n\r \t\\'))
  892. self.source[result['line'] - 1] = fixed_source
  893. return [result['line'], result['line'] + 1]
  894. def fix_e702(self, result, logical):
  895. """Put semicolon-separated compound statement on separate lines."""
  896. if not logical:
  897. return [] # pragma: no cover
  898. logical_lines = logical[2]
  899. # Avoid applying this when indented.
  900. # https://docs.python.org/reference/compound_stmts.html
  901. for line in logical_lines:
  902. if (result['id'] == 'E702' and ':' in line
  903. and STARTSWITH_INDENT_STATEMENT_REGEX.match(line)):
  904. if self.options.verbose:
  905. print(
  906. '---> avoid fixing {error} with '
  907. 'other compound statements'.format(error=result['id']),
  908. file=sys.stderr
  909. )
  910. return []
  911. line_index = result['line'] - 1
  912. target = self.source[line_index]
  913. if target.rstrip().endswith('\\'):
  914. # Normalize '1; \\\n2' into '1; 2'.
  915. self.source[line_index] = target.rstrip('\n \r\t\\')
  916. self.source[line_index + 1] = self.source[line_index + 1].lstrip()
  917. return [line_index + 1, line_index + 2]
  918. if target.rstrip().endswith(';'):
  919. self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
  920. return [line_index + 1]
  921. offset = result['column'] - 1
  922. first = target[:offset].rstrip(';').rstrip()
  923. second = (_get_indentation(logical_lines[0]) +
  924. target[offset:].lstrip(';').lstrip())
  925. # Find inline comment.
  926. inline_comment = None
  927. if target[offset:].lstrip(';').lstrip()[:2] == '# ':
  928. inline_comment = target[offset:].lstrip(';')
  929. if inline_comment:
  930. self.source[line_index] = first + inline_comment
  931. else:
  932. self.source[line_index] = first + '\n' + second
  933. return [line_index + 1]
  934. def fix_e704(self, result):
  935. """Fix multiple statements on one line def"""
  936. (line_index, _, target) = get_index_offset_contents(result,
  937. self.source)
  938. match = STARTSWITH_DEF_REGEX.match(target)
  939. if match:
  940. self.source[line_index] = '{}\n{}{}'.format(
  941. match.group(0),
  942. _get_indentation(target) + self.indent_word,
  943. target[match.end(0):].lstrip())
  944. def fix_e711(self, result):
  945. """Fix comparison with None."""
  946. (line_index, offset, target) = get_index_offset_contents(result,
  947. self.source)
  948. right_offset = offset + 2
  949. if right_offset >= len(target):
  950. return []
  951. left = target[:offset].rstrip()
  952. center = target[offset:right_offset]
  953. right = target[right_offset:].lstrip()
  954. if center.strip() == '==':
  955. new_center = 'is'
  956. elif center.strip() == '!=':
  957. new_center = 'is not'
  958. else:
  959. return []
  960. self.source[line_index] = ' '.join([left, new_center, right])
  961. def fix_e712(self, result):
  962. """Fix (trivial case of) comparison with boolean."""
  963. (line_index, offset, target) = get_index_offset_contents(result,
  964. self.source)
  965. # Handle very easy "not" special cases.
  966. if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target):
  967. self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:',
  968. r'if not \1:', target, count=1)
  969. elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target):
  970. self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:',
  971. r'if not \1:', target, count=1)
  972. else:
  973. right_offset = offset + 2
  974. if right_offset >= len(target):
  975. return []
  976. left = target[:offset].rstrip()
  977. center = target[offset:right_offset]
  978. right = target[right_offset:].lstrip()
  979. # Handle simple cases only.
  980. new_right = None
  981. if center.strip() == '==':
  982. if re.match(r'\bTrue\b', right):
  983. new_right = re.sub(r'\bTrue\b *', '', right, count=1)
  984. elif center.strip() == '!=':
  985. if re.match(r'\bFalse\b', right):
  986. new_right = re.sub(r'\bFalse\b *', '', right, count=1)
  987. if new_right is None:
  988. return []
  989. if new_right[0].isalnum():
  990. new_right = ' ' + new_right
  991. self.source[line_index] = left + new_right
  992. def fix_e713(self, result):
  993. """Fix (trivial case of) non-membership check."""
  994. (line_index, offset, target) = get_index_offset_contents(result,
  995. self.source)
  996. # to convert once 'not in' -> 'in'
  997. before_target = target[:offset]
  998. target = target[offset:]
  999. match_notin = COMPARE_NEGATIVE_REGEX_THROUGH.search(target)
  1000. notin_pos_start, notin_pos_end = 0, 0
  1001. if match_notin:
  1002. notin_pos_start = match_notin.start(1)
  1003. notin_pos_end = match_notin.end()
  1004. target = '{}{} {}'.format(
  1005. target[:notin_pos_start], 'in', target[notin_pos_end:])
  1006. # fix 'not in'
  1007. match = COMPARE_NEGATIVE_REGEX.search(target)
  1008. if match:
  1009. if match.group(3) == 'in':
  1010. pos_start = match.start(1)
  1011. new_target = '{5}{0}{1} {2} {3} {4}'.format(
  1012. target[:pos_start], match.group(2), match.group(1),
  1013. match.group(3), target[match.end():], before_target)
  1014. if match_notin:
  1015. # revert 'in' -> 'not in'
  1016. pos_start = notin_pos_start + offset
  1017. pos_end = notin_pos_end + offset - 4 # len('not ')
  1018. new_target = '{}{} {}'.format(
  1019. new_target[:pos_start], 'not in', new_target[pos_end:])
  1020. self.source[line_index] = new_target
  1021. def fix_e714(self, result):
  1022. """Fix object identity should be 'is not' case."""
  1023. (line_index, offset, target) = get_index_offset_contents(result,
  1024. self.source)
  1025. # to convert once 'is not' -> 'is'
  1026. before_target = target[:offset]
  1027. target = target[offset:]
  1028. match_isnot = COMPARE_NEGATIVE_REGEX_THROUGH.search(target)
  1029. isnot_pos_start, isnot_pos_end = 0, 0
  1030. if match_isnot:
  1031. isnot_pos_start = match_isnot.start(1)
  1032. isnot_pos_end = match_isnot.end()
  1033. target = '{}{} {}'.format(
  1034. target[:isnot_pos_start], 'in', target[isnot_pos_end:])
  1035. match = COMPARE_NEGATIVE_REGEX.search(target)
  1036. if match:
  1037. if match.group(3).startswith('is'):
  1038. pos_start = match.start(1)
  1039. new_target = '{5}{0}{1} {2} {3} {4}'.format(
  1040. target[:pos_start], match.group(2), match.group(3),
  1041. match.group(1), target[match.end():], before_target)
  1042. if match_isnot:
  1043. # revert 'is' -> 'is not'
  1044. pos_start = isnot_pos_start + offset
  1045. pos_end = isnot_pos_end + offset - 4 # len('not ')
  1046. new_target = '{}{} {}'.format(
  1047. new_target[:pos_start], 'is not', new_target[pos_end:])
  1048. self.source[line_index] = new_target
  1049. def fix_e722(self, result):
  1050. """fix bare except"""
  1051. (line_index, _, target) = get_index_offset_contents(result,
  1052. self.source)
  1053. match = BARE_EXCEPT_REGEX.search(target)
  1054. if match:
  1055. self.source[line_index] = '{}{}{}'.format(
  1056. target[:result['column'] - 1], "except BaseException:",
  1057. target[match.end():])
  1058. def fix_e731(self, result):
  1059. """Fix do not assign a lambda expression check."""
  1060. (line_index, _, target) = get_index_offset_contents(result,
  1061. self.source)
  1062. match = LAMBDA_REGEX.search(target)
  1063. if match:
  1064. end = match.end()
  1065. self.source[line_index] = '{}def {}({}): return {}'.format(
  1066. target[:match.start(0)], match.group(1), match.group(2),
  1067. target[end:].lstrip())
  1068. def fix_w291(self, result):
  1069. """Remove trailing whitespace."""
  1070. fixed_line = self.source[result['line'] - 1].rstrip()
  1071. self.source[result['line'] - 1] = fixed_line + '\n'
  1072. def fix_w391(self, _):
  1073. """Remove trailing blank lines."""
  1074. blank_count = 0
  1075. for line in reversed(self.source):
  1076. line = line.rstrip()
  1077. if line:
  1078. break
  1079. else:
  1080. blank_count += 1
  1081. original_length = len(self.source)
  1082. self.source = self.source[:original_length - blank_count]
  1083. return range(1, 1 + original_length)
  1084. def fix_w503(self, result):
  1085. (line_index, _, target) = get_index_offset_contents(result,
  1086. self.source)
  1087. one_string_token = target.split()[0]
  1088. try:
  1089. ts = generate_tokens(one_string_token)
  1090. except (SyntaxError, tokenize.TokenError):
  1091. return
  1092. if not _is_binary_operator(ts[0][0], one_string_token):
  1093. return
  1094. # find comment
  1095. comment_index = 0
  1096. found_not_comment_only_line = False
  1097. comment_only_linenum = 0
  1098. for i in range(5):
  1099. # NOTE: try to parse code in 5 times
  1100. if (line_index - i) < 0:
  1101. break
  1102. from_index = line_index - i - 1
  1103. if from_index < 0 or len(self.source) <= from_index:
  1104. break
  1105. to_index = line_index + 1
  1106. strip_line = self.source[from_index].lstrip()
  1107. if (
  1108. not found_not_comment_only_line and
  1109. strip_line and strip_line[0] == '#'
  1110. ):
  1111. comment_only_linenum += 1
  1112. continue
  1113. found_not_comment_only_line = True
  1114. try:
  1115. ts = generate_tokens("".join(self.source[from_index:to_index]))
  1116. except (SyntaxError, tokenize.TokenError):
  1117. continue
  1118. newline_count = 0
  1119. newline_index = []
  1120. for index, t in enumerate(ts):
  1121. if t[0] in (tokenize.NEWLINE, tokenize.NL):
  1122. newline_index.append(index)
  1123. newline_count += 1
  1124. if newline_count > 2:
  1125. tts = ts[newline_index[-3]:]
  1126. else:
  1127. tts = ts
  1128. old = []
  1129. for t in tts:
  1130. if t[0] in (tokenize.NEWLINE, tokenize.NL):
  1131. newline_count -= 1
  1132. if newline_count <= 1:
  1133. break
  1134. if tokenize.COMMENT == t[0] and old and old[0] != tokenize.NL:
  1135. comment_index = old[3][1]
  1136. break
  1137. old = t
  1138. break
  1139. i = target.index(one_string_token)
  1140. fix_target_line = line_index - 1 - comment_only_linenum
  1141. self.source[line_index] = '{}{}'.format(
  1142. target[:i], target[i + len(one_string_token):].lstrip())
  1143. nl = find_newline(self.source[fix_target_line:line_index])
  1144. before_line = self.source[fix_target_line]
  1145. bl = before_line.index(nl)
  1146. if comment_index:
  1147. self.source[fix_target_line] = '{} {} {}'.format(
  1148. before_line[:comment_index], one_string_token,
  1149. before_line[comment_index + 1:])
  1150. else:
  1151. if before_line[:bl].endswith("#"):
  1152. # special case
  1153. # see: https://github.com/hhatto/autopep8/issues/503
  1154. self.source[fix_target_line] = '{}{} {}'.format(
  1155. before_line[:bl-2], one_string_token, before_line[bl-2:])
  1156. else:
  1157. self.source[fix_target_line] = '{} {}{}'.format(
  1158. before_line[:bl], one_string_token, before_line[bl:])
  1159. def fix_w504(self, result):
  1160. (line_index, _, target) = get_index_offset_contents(result,
  1161. self.source)
  1162. # NOTE: is not collect pointed out in pycodestyle==2.4.0
  1163. comment_index = 0
  1164. operator_position = None # (start_position, end_position)
  1165. for i in range(1, 6):
  1166. to_index = line_index + i
  1167. try:
  1168. ts = generate_tokens("".join(self.source[line_index:to_index]))
  1169. except (SyntaxError, tokenize.TokenError):
  1170. continue
  1171. newline_count = 0
  1172. newline_index = []
  1173. for index, t in enumerate(ts):
  1174. if _is_binary_operator(t[0], t[1]):
  1175. if t[2][0] == 1 and t[3][0] == 1:
  1176. operator_position = (t[2][1], t[3][1])
  1177. elif t[0] == tokenize.NAME and t[1] in ("and", "or"):
  1178. if t[2][0] == 1 and t[3][0] == 1:
  1179. operator_position = (t[2][1], t[3][1])
  1180. elif t[0] in (tokenize.NEWLINE, tokenize.NL):
  1181. newline_index.append(index)
  1182. newline_count += 1
  1183. if newline_count > 2:
  1184. tts = ts[:newline_index[-3]]
  1185. else:
  1186. tts = ts
  1187. old = []
  1188. for t in tts:
  1189. if tokenize.COMMENT == t[0] and old:
  1190. comment_row, comment_index = old[3]
  1191. break
  1192. old = t
  1193. break
  1194. if not operator_position:
  1195. return
  1196. target_operator = target[operator_position[0]:operator_position[1]]
  1197. if comment_index and comment_row == 1:
  1198. self.source[line_index] = '{}{}'.format(
  1199. target[:operator_position[0]].rstrip(),
  1200. target[comment_index:])
  1201. else:
  1202. self.source[line_index] = '{}{}{}'.format(
  1203. target[:operator_position[0]].rstrip(),
  1204. target[operator_position[1]:].lstrip(),
  1205. target[operator_position[1]:])
  1206. next_line = self.source[line_index + 1]
  1207. next_line_indent = 0
  1208. m = re.match(r'\s*', next_line)
  1209. if m:
  1210. next_line_indent = m.span()[1]
  1211. self.source[line_index + 1] = '{}{} {}'.format(
  1212. next_line[:next_line_indent], target_operator,
  1213. next_line[next_line_indent:])
  1214. def fix_w605(self, result):
  1215. (line_index, offset, target) = get_index_offset_contents(result,
  1216. self.source)
  1217. self.source[line_index] = '{}\\{}'.format(
  1218. target[:offset + 1], target[offset + 1:])
  1219. def get_module_imports_on_top_of_file(source, import_line_index):
  1220. """return import or from keyword position
  1221. example:
  1222. > 0: import sys
  1223. 1: import os
  1224. 2:
  1225. 3: def function():
  1226. """
  1227. def is_string_literal(line):
  1228. if line[0] in 'uUbB':
  1229. line = line[1:]
  1230. if line and line[0] in 'rR':
  1231. line = line[1:]
  1232. return line and (line[0] == '"' or line[0] == "'")
  1233. def is_future_import(line):
  1234. nodes = ast.parse(line)
  1235. for n in nodes.body:
  1236. if isinstance(n, ast.ImportFrom) and n.module == '__future__':
  1237. return True
  1238. return False
  1239. def has_future_import(source):
  1240. offset = 0
  1241. line = ''
  1242. for _, next_line in source:
  1243. for line_part in next_line.strip().splitlines(True):
  1244. line = line + line_part
  1245. try:
  1246. return is_future_import(line), offset
  1247. except SyntaxError:
  1248. continue
  1249. offset += 1
  1250. return False, offset
  1251. allowed_try_keywords = ('try', 'except', 'else', 'finally')
  1252. in_docstring = False
  1253. docstring_kind = '"""'
  1254. source_stream = iter(enumerate(source))
  1255. for cnt, line in source_stream:
  1256. if not in_docstring:
  1257. m = DOCSTRING_START_REGEX.match(line.lstrip())
  1258. if m is not None:
  1259. in_docstring = True
  1260. docstring_kind = m.group('kind')
  1261. remain = line[m.end(): m.endpos].rstrip()
  1262. if remain[-3:] == docstring_kind: # one line doc
  1263. in_docstring = False
  1264. continue
  1265. if in_docstring:
  1266. if line.rstrip()[-3:] == docstring_kind:
  1267. in_docstring = False
  1268. continue
  1269. if not line.rstrip():
  1270. continue
  1271. elif line.startswith('#'):
  1272. continue
  1273. if line.startswith('import '):
  1274. if cnt == import_line_index:
  1275. continue
  1276. return cnt
  1277. elif line.startswith('from '):
  1278. if cnt == import_line_index:
  1279. continue
  1280. hit, offset = has_future_import(
  1281. itertools.chain([(cnt, line)], source_stream)
  1282. )
  1283. if hit:
  1284. # move to the back
  1285. return cnt + offset + 1
  1286. return cnt
  1287. elif pycodestyle.DUNDER_REGEX.match(line):
  1288. return cnt
  1289. elif any(line.startswith(kw) for kw in allowed_try_keywords):
  1290. continue
  1291. elif is_string_literal(line):
  1292. return cnt
  1293. else:
  1294. return cnt
  1295. return 0
  1296. def get_index_offset_contents(result, source):
  1297. """Return (line_index, column_offset, line_contents)."""
  1298. line_index = result['line'] - 1
  1299. return (line_index,
  1300. result['column'] - 1,
  1301. source[line_index])
  1302. def get_fixed_long_line(target, previous_line, original,
  1303. indent_word=' ', max_line_length=79,
  1304. aggressive=False, experimental=False, verbose=False):
  1305. """Break up long line and return result.
  1306. Do this by generating multiple reformatted candidates and then
  1307. ranking the candidates to heuristically select the best option.
  1308. """
  1309. indent = _get_indentation(target)
  1310. source = target[len(indent):]
  1311. assert source.lstrip() == source
  1312. assert not target.lstrip().startswith('#')
  1313. # Check for partial multiline.
  1314. tokens = list(generate_tokens(source))
  1315. candidates = shorten_line(
  1316. tokens, source, indent,
  1317. indent_word,
  1318. max_line_length,
  1319. aggressive=aggressive,
  1320. experimental=experimental,
  1321. previous_line=previous_line)
  1322. # Also sort alphabetically as a tie breaker (for determinism).
  1323. candidates = sorted(
  1324. sorted(set(candidates).union([target, original])),
  1325. key=lambda x: line_shortening_rank(
  1326. x,
  1327. indent_word,
  1328. max_line_length,
  1329. experimental=experimental))
  1330. if verbose >= 4:
  1331. print(('-' * 79 + '\n').join([''] + candidates + ['']),
  1332. file=wrap_output(sys.stderr, 'utf-8'))
  1333. if candidates:
  1334. best_candidate = candidates[0]
  1335. # Don't allow things to get longer.
  1336. if longest_line_length(best_candidate) > longest_line_length(original):
  1337. return None
  1338. return best_candidate
  1339. def longest_line_length(code):
  1340. """Return length of longest line."""
  1341. if len(code) == 0:
  1342. return 0
  1343. return max(len(line) for line in code.splitlines())
  1344. def join_logical_line(logical_line):
  1345. """Return single line based on logical line input."""
  1346. indentation = _get_indentation(logical_line)
  1347. return indentation + untokenize_without_newlines(
  1348. generate_tokens(logical_line.lstrip())) + '\n'
  1349. def untokenize_without_newlines(tokens):
  1350. """Return source code based on tokens."""
  1351. text = ''
  1352. last_row = 0
  1353. last_column = -1
  1354. for t in tokens:
  1355. token_string = t[1]
  1356. (start_row, start_column) = t[2]
  1357. (end_row, end_column) = t[3]
  1358. if start_row > last_row:
  1359. last_column = 0
  1360. if (
  1361. (start_column > last_column or token_string == '\n') and
  1362. not text.endswith(' ')
  1363. ):
  1364. text += ' '
  1365. if token_string != '\n':
  1366. text += token_string
  1367. last_row = end_row
  1368. last_column = end_column
  1369. return text.rstrip()
  1370. def _find_logical(source_lines):
  1371. # Make a variable which is the index of all the starts of lines.
  1372. logical_start = []
  1373. logical_end = []
  1374. last_newline = True
  1375. parens = 0
  1376. for t in generate_tokens(''.join(source_lines)):
  1377. if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
  1378. tokenize.INDENT, tokenize.NL,
  1379. tokenize.ENDMARKER]:
  1380. continue
  1381. if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
  1382. last_newline = True
  1383. logical_end.append((t[3][0] - 1, t[2][1]))
  1384. continue
  1385. if last_newline and not parens:
  1386. logical_start.append((t[2][0] - 1, t[2][1]))
  1387. last_newline = False
  1388. if t[0] == tokenize.OP:
  1389. if t[1] in '([{':
  1390. parens += 1
  1391. elif t[1] in '}])':
  1392. parens -= 1
  1393. return (logical_start, logical_end)
  1394. def _get_logical(source_lines, result, logical_start, logical_end):
  1395. """Return the logical line corresponding to the result.
  1396. Assumes input is already E702-clean.
  1397. """
  1398. row = result['line'] - 1
  1399. col = result['column'] - 1
  1400. ls = None
  1401. le = None
  1402. for i in range(0, len(logical_start), 1):
  1403. assert logical_end
  1404. x = logical_end[i]
  1405. if x[0] > row or (x[0] == row and x[1] > col):
  1406. le = x
  1407. ls = logical_start[i]
  1408. break
  1409. if ls is None:
  1410. return None
  1411. original = source_lines[ls[0]:le[0] + 1]
  1412. return ls, le, original
  1413. def get_item(items, index, default=None):
  1414. if 0 <= index < len(items):
  1415. return items[index]
  1416. return default
  1417. def reindent(source, indent_size):
  1418. """Reindent all lines."""
  1419. reindenter = Reindenter(source)
  1420. return reindenter.run(indent_size)
  1421. def code_almost_equal(a, b):
  1422. """Return True if code is similar.
  1423. Ignore whitespace when comparing specific line.
  1424. """
  1425. split_a = split_and_strip_non_empty_lines(a)
  1426. split_b = split_and_strip_non_empty_lines(b)
  1427. if len(split_a) != len(split_b):
  1428. return False
  1429. for (index, _) in enumerate(split_a):
  1430. if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
  1431. return False
  1432. return True
  1433. def split_and_strip_non_empty_lines(text):
  1434. """Return lines split by newline.
  1435. Ignore empty lines.
  1436. """
  1437. return [line.strip() for line in text.splitlines() if line.strip()]
  1438. def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
  1439. """Format block comments."""
  1440. if '#' not in source:
  1441. # Optimization.
  1442. return source
  1443. ignored_line_numbers = multiline_string_lines(
  1444. source,
  1445. include_docstrings=True) | set(commented_out_code_lines(source))
  1446. fixed_lines = []
  1447. sio = io.StringIO(source)
  1448. for (line_number, line) in enumerate(sio.readlines(), start=1):
  1449. if (
  1450. line.lstrip().startswith('#') and
  1451. line_number not in ignored_line_numbers and
  1452. not pycodestyle.noqa(line)
  1453. ):
  1454. indentation = _get_indentation(line)
  1455. line = line.lstrip()
  1456. # Normalize beginning if not a shebang.
  1457. if len(line) > 1:
  1458. pos = next((index for index, c in enumerate(line)
  1459. if c != '#'))
  1460. if (
  1461. # Leave multiple spaces like '# ' alone.
  1462. (line[:pos].count('#') > 1 or line[1].isalnum() or
  1463. not line[1].isspace()) and
  1464. line[1] not in ':!' and
  1465. # Leave stylistic outlined blocks alone.
  1466. not line.rstrip().endswith('#')
  1467. ):
  1468. line = '# ' + line.lstrip('# \t')
  1469. fixed_lines.append(indentation + line)
  1470. else:
  1471. fixed_lines.append(line)
  1472. return ''.join(fixed_lines)
  1473. def refactor(source, fixer_names, ignore=None, filename=''):
  1474. """Return refactored code using lib2to3.
  1475. Skip if ignore string is produced in the refactored code.
  1476. """
  1477. not_found_end_of_file_newline = source and source.rstrip("\r\n") == source
  1478. if not_found_end_of_file_newline:
  1479. input_source = source + "\n"
  1480. else:
  1481. input_source = source
  1482. from lib2to3 import pgen2
  1483. try:
  1484. new_text = refactor_with_2to3(input_source,
  1485. fixer_names=fixer_names,
  1486. filename=filename)
  1487. except (pgen2.parse.ParseError,
  1488. SyntaxError,
  1489. UnicodeDecodeError,
  1490. UnicodeEncodeError):
  1491. return source
  1492. if ignore:
  1493. if ignore in new_text and ignore not in source:
  1494. return source
  1495. if not_found_end_of_file_newline:
  1496. return new_text.rstrip("\r\n")
  1497. return new_text
  1498. def code_to_2to3(select, ignore, where='', verbose=False):
  1499. fixes = set()
  1500. for code, fix in CODE_TO_2TO3.items():
  1501. if code_match(code, select=select, ignore=ignore):
  1502. if verbose:
  1503. print('---> Applying {} fix for {}'.format(where,
  1504. code.upper()),
  1505. file=sys.stderr)
  1506. fixes |= set(fix)
  1507. return fixes
  1508. def fix_2to3(source,
  1509. aggressive=True, select=None, ignore=None, filename='',
  1510. where='global', verbose=False):
  1511. """Fix various deprecated code (via lib2to3)."""
  1512. if not aggressive:
  1513. return source
  1514. select = select or []
  1515. ignore = ignore or []
  1516. return refactor(source,
  1517. code_to_2to3(select=select,
  1518. ignore=ignore,
  1519. where=where,
  1520. verbose=verbose),
  1521. filename=filename)
  1522. def fix_w602(source, aggressive=True):
  1523. """Fix deprecated form of raising exception."""
  1524. if not aggressive:
  1525. return source
  1526. return refactor(source, ['raise'], ignore='with_traceback')
  1527. def find_newline(source):
  1528. """Return type of newline used in source.
  1529. Input is a list of lines.
  1530. """
  1531. assert not isinstance(source, unicode)
  1532. counter = collections.defaultdict(int)
  1533. for line in source:
  1534. if line.endswith(CRLF):
  1535. counter[CRLF] += 1
  1536. elif line.endswith(CR):
  1537. counter[CR] += 1
  1538. elif line.endswith(LF):
  1539. counter[LF] += 1
  1540. return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
  1541. def _get_indentword(source):
  1542. """Return indentation type."""
  1543. indent_word = ' ' # Default in case source has no indentation
  1544. try:
  1545. for t in generate_tokens(source):
  1546. if t[0] == token.INDENT:
  1547. indent_word = t[1]
  1548. break
  1549. except (SyntaxError, tokenize.TokenError):
  1550. pass
  1551. return indent_word
  1552. def _get_indentation(line):
  1553. """Return leading whitespace."""
  1554. if line.strip():
  1555. non_whitespace_index = len(line) - len(line.lstrip())
  1556. return line[:non_whitespace_index]
  1557. return ''
  1558. def get_diff_text(old, new, filename):
  1559. """Return text of unified diff between old and new."""
  1560. newline = '\n'
  1561. diff = difflib.unified_diff(
  1562. old, new,
  1563. 'original/' + filename,
  1564. 'fixed/' + filename,
  1565. lineterm=newline)
  1566. text = ''
  1567. for line in diff:
  1568. text += line
  1569. # Work around missing newline (http://bugs.python.org/issue2142).
  1570. if text and not line.endswith(newline):
  1571. text += newline + r'\ No newline at end of file' + newline
  1572. return text
  1573. def _priority_key(pep8_result):
  1574. """Key for sorting PEP8 results.
  1575. Global fixes should be done first. This is important for things like
  1576. indentation.
  1577. """
  1578. priority = [
  1579. # Fix multiline colon-based before semicolon based.
  1580. 'e701',
  1581. # Break multiline statements early.
  1582. 'e702',
  1583. # Things that make lines longer.
  1584. 'e225', 'e231',
  1585. # Remove extraneous whitespace before breaking lines.
  1586. 'e201',
  1587. # Shorten whitespace in comment before resorting to wrapping.
  1588. 'e262'
  1589. ]
  1590. middle_index = 10000
  1591. lowest_priority = [
  1592. # We need to shorten lines last since the logical fixer can get in a
  1593. # loop, which causes us to exit early.
  1594. 'e501',
  1595. ]
  1596. key = pep8_result['id'].lower()
  1597. try:
  1598. return priority.index(key)
  1599. except ValueError:
  1600. try:
  1601. return middle_index + lowest_priority.index(key) + 1
  1602. except ValueError:
  1603. return middle_index
  1604. def shorten_line(tokens, source, indentation, indent_word, max_line_length,
  1605. aggressive=False, experimental=False, previous_line=''):
  1606. """Separate line at OPERATOR.
  1607. Multiple candidates will be yielded.
  1608. """
  1609. for candidate in _shorten_line(tokens=tokens,
  1610. source=source,
  1611. indentation=indentation,
  1612. indent_word=indent_word,
  1613. aggressive=aggressive,
  1614. previous_line=previous_line):
  1615. yield candidate
  1616. if aggressive:
  1617. for key_token_strings in SHORTEN_OPERATOR_GROUPS:
  1618. shortened = _shorten_line_at_tokens(
  1619. tokens=tokens,
  1620. source=source,
  1621. indentation=indentation,
  1622. indent_word=indent_word,
  1623. key_token_strings=key_token_strings,
  1624. aggressive=aggressive)
  1625. if shortened is not None and shortened != source:
  1626. yield shortened
  1627. if experimental:
  1628. for shortened in _shorten_line_at_tokens_new(
  1629. tokens=tokens,
  1630. source=source,
  1631. indentation=indentation,
  1632. max_line_length=max_line_length):
  1633. yield shortened
  1634. def _shorten_line(tokens, source, indentation, indent_word,
  1635. aggressive=False, previous_line=''):
  1636. """Separate line at OPERATOR.
  1637. The input is expected to be free of newlines except for inside multiline
  1638. strings and at the end.
  1639. Multiple candidates will be yielded.
  1640. """
  1641. for (token_type,
  1642. token_string,
  1643. start_offset,
  1644. end_offset) in token_offsets(tokens):
  1645. if (
  1646. token_type == tokenize.COMMENT and
  1647. not is_probably_part_of_multiline(previous_line) and
  1648. not is_probably_part_of_multiline(source) and
  1649. not source[start_offset + 1:].strip().lower().startswith(
  1650. ('noqa', 'pragma:', 'pylint:'))
  1651. ):
  1652. # Move inline comments to previous line.
  1653. first = source[:start_offset]
  1654. second = source[start_offset:]
  1655. yield (indentation + second.strip() + '\n' +
  1656. indentation + first.strip() + '\n')
  1657. elif token_type == token.OP and token_string != '=':
  1658. # Don't break on '=' after keyword as this violates PEP 8.
  1659. assert token_type != token.INDENT
  1660. first = source[:end_offset]
  1661. second_indent = indentation
  1662. if (first.rstrip().endswith('(') and
  1663. source[end_offset:].lstrip().startswith(')')):
  1664. pass
  1665. elif first.rstrip().endswith('('):
  1666. second_indent += indent_word
  1667. elif '(' in first:
  1668. second_indent += ' ' * (1 + first.find('('))
  1669. else:
  1670. second_indent += indent_word
  1671. second = (second_indent + source[end_offset:].lstrip())
  1672. if (
  1673. not second.strip() or
  1674. second.lstrip().startswith('#')
  1675. ):
  1676. continue
  1677. # Do not begin a line with a comma
  1678. if second.lstrip().startswith(','):
  1679. continue
  1680. # Do end a line with a dot
  1681. if first.rstrip().endswith('.'):
  1682. continue
  1683. if token_string in '+-*/':
  1684. fixed = first + ' \\' + '\n' + second
  1685. else:
  1686. fixed = first + '\n' + second
  1687. # Only fix if syntax is okay.
  1688. if check_syntax(normalize_multiline(fixed)
  1689. if aggressive else fixed):
  1690. yield indentation + fixed
  1691. def _is_binary_operator(token_type, text):
  1692. return ((token_type == tokenize.OP or text in ['and', 'or']) and
  1693. text not in '()[]{},:.;@=%~')
  1694. # A convenient way to handle tokens.
  1695. Token = collections.namedtuple('Token', ['token_type', 'token_string',
  1696. 'spos', 'epos', 'line'])
  1697. class ReformattedLines(object):
  1698. """The reflowed lines of atoms.
  1699. Each part of the line is represented as an "atom." They can be moved
  1700. around when need be to get the optimal formatting.
  1701. """
  1702. ###########################################################################
  1703. # Private Classes
  1704. class _Indent(object):
  1705. """Represent an indentation in the atom stream."""
  1706. def __init__(self, indent_amt):
  1707. self._indent_amt = indent_amt
  1708. def emit(self):
  1709. return ' ' * self._indent_amt
  1710. @property
  1711. def size(self):
  1712. return self._indent_amt
  1713. class _Space(object):
  1714. """Represent a space in the atom stream."""
  1715. def emit(self):
  1716. return ' '
  1717. @property
  1718. def size(self):
  1719. return 1
  1720. class _LineBreak(object):
  1721. """Represent a line break in the atom stream."""
  1722. def emit(self):
  1723. return '\n'
  1724. @property
  1725. def size(self):
  1726. return 0
  1727. def __init__(self, max_line_length):
  1728. self._max_line_length = max_line_length
  1729. self._lines = []
  1730. self._bracket_depth = 0
  1731. self._prev_item = None
  1732. self._prev_prev_item = None
  1733. def __repr__(self):
  1734. return self.emit()
  1735. ###########################################################################
  1736. # Public Methods
  1737. def add(self, obj, indent_amt, break_after_open_bracket):
  1738. if isinstance(obj, Atom):
  1739. self._add_item(obj, indent_amt)
  1740. return
  1741. self._add_container(obj, indent_amt, break_after_open_bracket)
  1742. def add_comment(self, item):
  1743. num_spaces = 2
  1744. if len(self._lines) > 1:
  1745. if isinstance(self._lines[-1], self._Space):
  1746. num_spaces -= 1
  1747. if len(self._lines) > 2:
  1748. if isinstance(self._lines[-2], self._Space):
  1749. num_spaces -= 1
  1750. while num_spaces > 0:
  1751. self._lines.append(self._Space())
  1752. num_spaces -= 1
  1753. self._lines.append(item)
  1754. def add_indent(self, indent_amt):
  1755. self._lines.append(self._Indent(indent_amt))
  1756. def add_line_break(self, indent):
  1757. self._lines.append(self._LineBreak())
  1758. self.add_indent(len(indent))
  1759. def add_line_break_at(self, index, indent_amt):
  1760. self._lines.insert(index, self._LineBreak())
  1761. self._lines.insert(index + 1, self._Indent(indent_amt))
  1762. def add_space_if_needed(self, curr_text, equal=False):
  1763. if (
  1764. not self._lines or isinstance(
  1765. self._lines[-1], (self._LineBreak, self._Indent, self._Space))
  1766. ):
  1767. return
  1768. prev_text = unicode(self._prev_item)
  1769. prev_prev_text = (
  1770. unicode(self._prev_prev_item) if self._prev_prev_item else '')
  1771. if (
  1772. # The previous item was a keyword or identifier and the current
  1773. # item isn't an operator that doesn't require a space.
  1774. ((self._prev_item.is_keyword or self._prev_item.is_string or
  1775. self._prev_item.is_name or self._prev_item.is_number) and
  1776. (curr_text[0] not in '([{.,:}])' or
  1777. (curr_text[0] == '=' and equal))) or
  1778. # Don't place spaces around a '.', unless it's in an 'import'
  1779. # statement.
  1780. ((prev_prev_text != 'from' and prev_text[-1] != '.' and
  1781. curr_text != 'import') and
  1782. # Don't place a space before a colon.
  1783. curr_text[0] != ':' and
  1784. # Don't split up ending brackets by spaces.
  1785. ((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
  1786. # Put a space after a colon or comma.
  1787. prev_text[-1] in ':,' or
  1788. # Put space around '=' if asked to.
  1789. (equal and prev_text == '=') or
  1790. # Put spaces around non-unary arithmetic operators.
  1791. ((self._prev_prev_item and
  1792. (prev_text not in '+-' and
  1793. (self._prev_prev_item.is_name or
  1794. self._prev_prev_item.is_number or
  1795. self._prev_prev_item.is_string)) and
  1796. prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in')))))
  1797. ):
  1798. self._lines.append(self._Space())
  1799. def previous_item(self):
  1800. """Return the previous non-whitespace item."""
  1801. return self._prev_item
  1802. def fits_on_current_line(self, item_extent):
  1803. return self.current_size() + item_extent <= self._max_line_length
  1804. def current_size(self):
  1805. """The size of the current line minus the indentation."""
  1806. size = 0
  1807. for item in reversed(self._lines):
  1808. size += item.size
  1809. if isinstance(item, self._LineBreak):
  1810. break
  1811. return size
  1812. def line_empty(self):
  1813. return (self._lines and
  1814. isinstance(self._lines[-1],
  1815. (self._LineBreak, self._Indent)))
  1816. def emit(self):
  1817. string = ''
  1818. for item in self._lines:
  1819. if isinstance(item, self._LineBreak):
  1820. string = string.rstrip()
  1821. string += item.emit()
  1822. return string.rstrip() + '\n'
  1823. ###########################################################################
  1824. # Private Methods
  1825. def _add_item(self, item, indent_amt):
  1826. """Add an item to the line.
  1827. Reflow the line to get the best formatting after the item is
  1828. inserted. The bracket depth indicates if the item is being
  1829. inserted inside of a container or not.
  1830. """
  1831. if self._prev_item and self._prev_item.is_string and item.is_string:
  1832. # Place consecutive string literals on separate lines.
  1833. self._lines.append(self._LineBreak())
  1834. self._lines.append(self._Indent(indent_amt))
  1835. item_text = unicode(item)
  1836. if self._lines and self._bracket_depth:
  1837. # Adding the item into a container.
  1838. self._prevent_default_initializer_splitting(item, indent_amt)
  1839. if item_text in '.,)]}':
  1840. self._split_after_delimiter(item, indent_amt)
  1841. elif self._lines and not self.line_empty():
  1842. # Adding the item outside of a container.
  1843. if self.fits_on_current_line(len(item_text)):
  1844. self._enforce_space(item)
  1845. else:
  1846. # Line break for the new item.
  1847. self._lines.append(self._LineBreak())
  1848. self._lines.append(self._Indent(indent_amt))
  1849. self._lines.append(item)
  1850. self._prev_item, self._prev_prev_item = item, self._prev_item
  1851. if item_text in '([{':
  1852. self._bracket_depth += 1
  1853. elif item_text in '}])':
  1854. self._bracket_depth -= 1
  1855. assert self._bracket_depth >= 0
  1856. def _add_container(self, container, indent_amt, break_after_open_bracket):
  1857. actual_indent = indent_amt + 1
  1858. if (
  1859. unicode(self._prev_item) != '=' and
  1860. not self.line_empty() and
  1861. not self.fits_on_current_line(
  1862. container.size + self._bracket_depth + 2)
  1863. ):
  1864. if unicode(container)[0] == '(' and self._prev_item.is_name:
  1865. # Don't split before the opening bracket of a call.
  1866. break_after_open_bracket = True
  1867. actual_indent = indent_amt + 4
  1868. elif (
  1869. break_after_open_bracket or
  1870. unicode(self._prev_item) not in '([{'
  1871. ):
  1872. # If the container doesn't fit on the current line and the
  1873. # current line isn't empty, place the container on the next
  1874. # line.
  1875. self._lines.append(self._LineBreak())
  1876. self._lines.append(self._Indent(indent_amt))
  1877. break_after_open_bracket = False
  1878. else:
  1879. actual_indent = self.current_size() + 1
  1880. break_after_open_bracket = False
  1881. if isinstance(container, (ListComprehension, IfExpression)):
  1882. actual_indent = indent_amt
  1883. # Increase the continued indentation only if recursing on a
  1884. # container.
  1885. container.reflow(self, ' ' * actual_indent,
  1886. break_after_open_bracket=break_after_open_bracket)
  1887. def _prevent_default_initializer_splitting(self, item, indent_amt):
  1888. """Prevent splitting between a default initializer.
  1889. When there is a default initializer, it's best to keep it all on
  1890. the same line. It's nicer and more readable, even if it goes
  1891. over the maximum allowable line length. This goes back along the
  1892. current line to determine if we have a default initializer, and,
  1893. if so, to remove extraneous whitespaces and add a line
  1894. break/indent before it if needed.
  1895. """
  1896. if unicode(item) == '=':
  1897. # This is the assignment in the initializer. Just remove spaces for
  1898. # now.
  1899. self._delete_whitespace()
  1900. return
  1901. if (not self._prev_item or not self._prev_prev_item or
  1902. unicode(self._prev_item) != '='):
  1903. return
  1904. self._delete_whitespace()
  1905. prev_prev_index = self._lines.index(self._prev_prev_item)
  1906. if (
  1907. isinstance(self._lines[prev_prev_index - 1], self._Indent) or
  1908. self.fits_on_current_line(item.size + 1)
  1909. ):
  1910. # The default initializer is already the only item on this line.
  1911. # Don't insert a newline here.
  1912. return
  1913. # Replace the space with a newline/indent combo.
  1914. if isinstance(self._lines[prev_prev_index - 1], self._Space):
  1915. del self._lines[prev_prev_index - 1]
  1916. self.add_line_break_at(self._lines.index(self._prev_prev_item),
  1917. indent_amt)
  1918. def _split_after_delimiter(self, item, indent_amt):
  1919. """Split the line only after a delimiter."""
  1920. self._delete_whitespace()
  1921. if self.fits_on_current_line(item.size):
  1922. return
  1923. last_space = None
  1924. for current_item in reversed(self._lines):
  1925. if (
  1926. last_space and
  1927. (not isinstance(current_item, Atom) or
  1928. not current_item.is_colon)
  1929. ):
  1930. break
  1931. else:
  1932. last_space = None
  1933. if isinstance(current_item, self._Space):
  1934. last_space = current_item
  1935. if isinstance(current_item, (self._LineBreak, self._Indent)):
  1936. return
  1937. if not last_space:
  1938. return
  1939. self.add_line_break_at(self._lines.index(last_space), indent_amt)
  1940. def _enforce_space(self, item):
  1941. """Enforce a space in certain situations.
  1942. There are cases where we will want a space where normally we
  1943. wouldn't put one. This just enforces the addition of a space.
  1944. """
  1945. if isinstance(self._lines[-1],
  1946. (self._Space, self._LineBreak, self._Indent)):
  1947. return
  1948. if not self._prev_item:
  1949. return
  1950. item_text = unicode(item)
  1951. prev_text = unicode(self._prev_item)
  1952. # Prefer a space around a '.' in an import statement, and between the
  1953. # 'import' and '('.
  1954. if (
  1955. (item_text == '.' and prev_text == 'from') or
  1956. (item_text == 'import' and prev_text == '.') or
  1957. (item_text == '(' and prev_text == 'import')
  1958. ):
  1959. self._lines.append(self._Space())
  1960. def _delete_whitespace(self):
  1961. """Delete all whitespace from the end of the line."""
  1962. while isinstance(self._lines[-1], (self._Space, self._LineBreak,
  1963. self._Indent)):
  1964. del self._lines[-1]
  1965. class Atom(object):
  1966. """The smallest unbreakable unit that can be reflowed."""
  1967. def __init__(self, atom):
  1968. self._atom = atom
  1969. def __repr__(self):
  1970. return self._atom.token_string
  1971. def __len__(self):
  1972. return self.size
  1973. def reflow(
  1974. self, reflowed_lines, continued_indent, extent,
  1975. break_after_open_bracket=False,
  1976. is_list_comp_or_if_expr=False,
  1977. next_is_dot=False
  1978. ):
  1979. if self._atom.token_type == tokenize.COMMENT:
  1980. reflowed_lines.add_comment(self)
  1981. return
  1982. total_size = extent if extent else self.size
  1983. if self._atom.token_string not in ',:([{}])':
  1984. # Some atoms will need an extra 1-sized space token after them.
  1985. total_size += 1
  1986. prev_item = reflowed_lines.previous_item()
  1987. if (
  1988. not is_list_comp_or_if_expr and
  1989. not reflowed_lines.fits_on_current_line(total_size) and
  1990. not (next_is_dot and
  1991. reflowed_lines.fits_on_current_line(self.size + 1)) and
  1992. not reflowed_lines.line_empty() and
  1993. not self.is_colon and
  1994. not (prev_item and prev_item.is_name and
  1995. unicode(self) == '(')
  1996. ):
  1997. # Start a new line if there is already something on the line and
  1998. # adding this atom would make it go over the max line length.
  1999. reflowed_lines.add_line_break(continued_indent)
  2000. else:
  2001. reflowed_lines.add_space_if_needed(unicode(self))
  2002. reflowed_lines.add(self, len(continued_indent),
  2003. break_after_open_bracket)
  2004. def emit(self):
  2005. return self.__repr__()
  2006. @property
  2007. def is_keyword(self):
  2008. return keyword.iskeyword(self._atom.token_string)
  2009. @property
  2010. def is_string(self):
  2011. return self._atom.token_type == tokenize.STRING
  2012. @property
  2013. def is_name(self):
  2014. return self._atom.token_type == tokenize.NAME
  2015. @property
  2016. def is_number(self):
  2017. return self._atom.token_type == tokenize.NUMBER
  2018. @property
  2019. def is_comma(self):
  2020. return self._atom.token_string == ','
  2021. @property
  2022. def is_colon(self):
  2023. return self._atom.token_string == ':'
  2024. @property
  2025. def size(self):
  2026. return len(self._atom.token_string)
  2027. class Container(object):
  2028. """Base class for all container types."""
  2029. def __init__(self, items):
  2030. self._items = items
  2031. def __repr__(self):
  2032. string = ''
  2033. last_was_keyword = False
  2034. for item in self._items:
  2035. if item.is_comma:
  2036. string += ', '
  2037. elif item.is_colon:
  2038. string += ': '
  2039. else:
  2040. item_string = unicode(item)
  2041. if (
  2042. string and
  2043. (last_was_keyword or
  2044. (not string.endswith(tuple('([{,.:}]) ')) and
  2045. not item_string.startswith(tuple('([{,.:}])'))))
  2046. ):
  2047. string += ' '
  2048. string += item_string
  2049. last_was_keyword = item.is_keyword
  2050. return string
  2051. def __iter__(self):
  2052. for element in self._items:
  2053. yield element
  2054. def __getitem__(self, idx):
  2055. return self._items[idx]
  2056. def reflow(self, reflowed_lines, continued_indent,
  2057. break_after_open_bracket=False):
  2058. last_was_container = False
  2059. for (index, item) in enumerate(self._items):
  2060. next_item = get_item(self._items, index + 1)
  2061. if isinstance(item, Atom):
  2062. is_list_comp_or_if_expr = (
  2063. isinstance(self, (ListComprehension, IfExpression)))
  2064. item.reflow(reflowed_lines, continued_indent,
  2065. self._get_extent(index),
  2066. is_list_comp_or_if_expr=is_list_comp_or_if_expr,
  2067. next_is_dot=(next_item and
  2068. unicode(next_item) == '.'))
  2069. if last_was_container and item.is_comma:
  2070. reflowed_lines.add_line_break(continued_indent)
  2071. last_was_container = False
  2072. else: # isinstance(item, Container)
  2073. reflowed_lines.add(item, len(continued_indent),
  2074. break_after_open_bracket)
  2075. last_was_container = not isinstance(item, (ListComprehension,
  2076. IfExpression))
  2077. if (
  2078. break_after_open_bracket and index == 0 and
  2079. # Prefer to keep empty containers together instead of
  2080. # separating them.
  2081. unicode(item) == self.open_bracket and
  2082. (not next_item or unicode(next_item) != self.close_bracket) and
  2083. (len(self._items) != 3 or not isinstance(next_item, Atom))
  2084. ):
  2085. reflowed_lines.add_line_break(continued_indent)
  2086. break_after_open_bracket = False
  2087. else:
  2088. next_next_item = get_item(self._items, index + 2)
  2089. if (
  2090. unicode(item) not in ['.', '%', 'in'] and
  2091. next_item and not isinstance(next_item, Container) and
  2092. unicode(next_item) != ':' and
  2093. next_next_item and (not isinstance(next_next_item, Atom) or
  2094. unicode(next_item) == 'not') and
  2095. not reflowed_lines.line_empty() and
  2096. not reflowed_lines.fits_on_current_line(
  2097. self._get_extent(index + 1) + 2)
  2098. ):
  2099. reflowed_lines.add_line_break(continued_indent)
  2100. def _get_extent(self, index):
  2101. """The extent of the full element.
  2102. E.g., the length of a function call or keyword.
  2103. """
  2104. extent = 0
  2105. prev_item = get_item(self._items, index - 1)
  2106. seen_dot = prev_item and unicode(prev_item) == '.'
  2107. while index < len(self._items):
  2108. item = get_item(self._items, index)
  2109. index += 1
  2110. if isinstance(item, (ListComprehension, IfExpression)):
  2111. break
  2112. if isinstance(item, Container):
  2113. if prev_item and prev_item.is_name:
  2114. if seen_dot:
  2115. extent += 1
  2116. else:
  2117. extent += item.size
  2118. prev_item = item
  2119. continue
  2120. elif (unicode(item) not in ['.', '=', ':', 'not'] and
  2121. not item.is_name and not item.is_string):
  2122. break
  2123. if unicode(item) == '.':
  2124. seen_dot = True
  2125. extent += item.size
  2126. prev_item = item
  2127. return extent
  2128. @property
  2129. def is_string(self):
  2130. return False
  2131. @property
  2132. def size(self):
  2133. return len(self.__repr__())
  2134. @property
  2135. def is_keyword(self):
  2136. return False
  2137. @property
  2138. def is_name(self):
  2139. return False
  2140. @property
  2141. def is_comma(self):
  2142. return False
  2143. @property
  2144. def is_colon(self):
  2145. return False
  2146. @property
  2147. def open_bracket(self):
  2148. return None
  2149. @property
  2150. def close_bracket(self):
  2151. return None
  2152. class Tuple(Container):
  2153. """A high-level representation of a tuple."""
  2154. @property
  2155. def open_bracket(self):
  2156. return '('
  2157. @property
  2158. def close_bracket(self):
  2159. return ')'
  2160. class List(Container):
  2161. """A high-level representation of a list."""
  2162. @property
  2163. def open_bracket(self):
  2164. return '['
  2165. @property
  2166. def close_bracket(self):
  2167. return ']'
  2168. class DictOrSet(Container):
  2169. """A high-level representation of a dictionary or set."""
  2170. @property
  2171. def open_bracket(self):
  2172. return '{'
  2173. @property
  2174. def close_bracket(self):
  2175. return '}'
  2176. class ListComprehension(Container):
  2177. """A high-level representation of a list comprehension."""
  2178. @property
  2179. def size(self):
  2180. length = 0
  2181. for item in self._items:
  2182. if isinstance(item, IfExpression):
  2183. break
  2184. length += item.size
  2185. return length
  2186. class IfExpression(Container):
  2187. """A high-level representation of an if-expression."""
  2188. def _parse_container(tokens, index, for_or_if=None):
  2189. """Parse a high-level container, such as a list, tuple, etc."""
  2190. # Store the opening bracket.
  2191. items = [Atom(Token(*tokens[index]))]
  2192. index += 1
  2193. num_tokens = len(tokens)
  2194. while index < num_tokens:
  2195. tok = Token(*tokens[index])
  2196. if tok.token_string in ',)]}':
  2197. # First check if we're at the end of a list comprehension or
  2198. # if-expression. Don't add the ending token as part of the list
  2199. # comprehension or if-expression, because they aren't part of those
  2200. # constructs.
  2201. if for_or_if == 'for':
  2202. return (ListComprehension(items), index - 1)
  2203. elif for_or_if == 'if':
  2204. return (IfExpression(items), index - 1)
  2205. # We've reached the end of a container.
  2206. items.append(Atom(tok))
  2207. # If not, then we are at the end of a container.
  2208. if tok.token_string == ')':
  2209. # The end of a tuple.
  2210. return (Tuple(items), index)
  2211. elif tok.token_string == ']':
  2212. # The end of a list.
  2213. return (List(items), index)
  2214. elif tok.token_string == '}':
  2215. # The end of a dictionary or set.
  2216. return (DictOrSet(items), index)
  2217. elif tok.token_string in '([{':
  2218. # A sub-container is being defined.
  2219. (container, index) = _parse_container(tokens, index)
  2220. items.append(container)
  2221. elif tok.token_string == 'for':
  2222. (container, index) = _parse_container(tokens, index, 'for')
  2223. items.append(container)
  2224. elif tok.token_string == 'if':
  2225. (container, index) = _parse_container(tokens, index, 'if')
  2226. items.append(container)
  2227. else:
  2228. items.append(Atom(tok))
  2229. index += 1
  2230. return (None, None)
  2231. def _parse_tokens(tokens):
  2232. """Parse the tokens.
  2233. This converts the tokens into a form where we can manipulate them
  2234. more easily.
  2235. """
  2236. index = 0
  2237. parsed_tokens = []
  2238. num_tokens = len(tokens)
  2239. while index < num_tokens:
  2240. tok = Token(*tokens[index])
  2241. assert tok.token_type != token.INDENT
  2242. if tok.token_type == tokenize.NEWLINE:
  2243. # There's only one newline and it's at the end.
  2244. break
  2245. if tok.token_string in '([{':
  2246. (container, index) = _parse_container(tokens, index)
  2247. if not container:
  2248. return None
  2249. parsed_tokens.append(container)
  2250. else:
  2251. parsed_tokens.append(Atom(tok))
  2252. index += 1
  2253. return parsed_tokens
  2254. def _reflow_lines(parsed_tokens, indentation, max_line_length,
  2255. start_on_prefix_line):
  2256. """Reflow the lines so that it looks nice."""
  2257. if unicode(parsed_tokens[0]) == 'def':
  2258. # A function definition gets indented a bit more.
  2259. continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
  2260. else:
  2261. continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
  2262. break_after_open_bracket = not start_on_prefix_line
  2263. lines = ReformattedLines(max_line_length)
  2264. lines.add_indent(len(indentation.lstrip('\r\n')))
  2265. if not start_on_prefix_line:
  2266. # If splitting after the opening bracket will cause the first element
  2267. # to be aligned weirdly, don't try it.
  2268. first_token = get_item(parsed_tokens, 0)
  2269. second_token = get_item(parsed_tokens, 1)
  2270. if (
  2271. first_token and second_token and
  2272. unicode(second_token)[0] == '(' and
  2273. len(indentation) + len(first_token) + 1 == len(continued_indent)
  2274. ):
  2275. return None
  2276. for item in parsed_tokens:
  2277. lines.add_space_if_needed(unicode(item), equal=True)
  2278. save_continued_indent = continued_indent
  2279. if start_on_prefix_line and isinstance(item, Container):
  2280. start_on_prefix_line = False
  2281. continued_indent = ' ' * (lines.current_size() + 1)
  2282. item.reflow(lines, continued_indent, break_after_open_bracket)
  2283. continued_indent = save_continued_indent
  2284. return lines.emit()
  2285. def _shorten_line_at_tokens_new(tokens, source, indentation,
  2286. max_line_length):
  2287. """Shorten the line taking its length into account.
  2288. The input is expected to be free of newlines except for inside
  2289. multiline strings and at the end.
  2290. """
  2291. # Yield the original source so to see if it's a better choice than the
  2292. # shortened candidate lines we generate here.
  2293. yield indentation + source
  2294. parsed_tokens = _parse_tokens(tokens)
  2295. if parsed_tokens:
  2296. # Perform two reflows. The first one starts on the same line as the
  2297. # prefix. The second starts on the line after the prefix.
  2298. fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
  2299. start_on_prefix_line=True)
  2300. if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
  2301. yield fixed
  2302. fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
  2303. start_on_prefix_line=False)
  2304. if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
  2305. yield fixed
  2306. def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
  2307. key_token_strings, aggressive):
  2308. """Separate line by breaking at tokens in key_token_strings.
  2309. The input is expected to be free of newlines except for inside
  2310. multiline strings and at the end.
  2311. """
  2312. offsets = []
  2313. for (index, _t) in enumerate(token_offsets(tokens)):
  2314. (token_type,
  2315. token_string,
  2316. start_offset,
  2317. end_offset) = _t
  2318. assert token_type != token.INDENT
  2319. if token_string in key_token_strings:
  2320. # Do not break in containers with zero or one items.
  2321. unwanted_next_token = {
  2322. '(': ')',
  2323. '[': ']',
  2324. '{': '}'}.get(token_string)
  2325. if unwanted_next_token:
  2326. if (
  2327. get_item(tokens,
  2328. index + 1,
  2329. default=[None, None])[1] == unwanted_next_token or
  2330. get_item(tokens,
  2331. index + 2,
  2332. default=[None, None])[1] == unwanted_next_token
  2333. ):
  2334. continue
  2335. if (
  2336. index > 2 and token_string == '(' and
  2337. tokens[index - 1][1] in ',(%['
  2338. ):
  2339. # Don't split after a tuple start, or before a tuple start if
  2340. # the tuple is in a list.
  2341. continue
  2342. if end_offset < len(source) - 1:
  2343. # Don't split right before newline.
  2344. offsets.append(end_offset)
  2345. else:
  2346. # Break at adjacent strings. These were probably meant to be on
  2347. # separate lines in the first place.
  2348. previous_token = get_item(tokens, index - 1)
  2349. if (
  2350. token_type == tokenize.STRING and
  2351. previous_token and previous_token[0] == tokenize.STRING
  2352. ):
  2353. offsets.append(start_offset)
  2354. current_indent = None
  2355. fixed = None
  2356. for line in split_at_offsets(source, offsets):
  2357. if fixed:
  2358. fixed += '\n' + current_indent + line
  2359. for symbol in '([{':
  2360. if line.endswith(symbol):
  2361. current_indent += indent_word
  2362. else:
  2363. # First line.
  2364. fixed = line
  2365. assert not current_indent
  2366. current_indent = indent_word
  2367. assert fixed is not None
  2368. if check_syntax(normalize_multiline(fixed)
  2369. if aggressive > 1 else fixed):
  2370. return indentation + fixed
  2371. return None
  2372. def token_offsets(tokens):
  2373. """Yield tokens and offsets."""
  2374. end_offset = 0
  2375. previous_end_row = 0
  2376. previous_end_column = 0
  2377. for t in tokens:
  2378. token_type = t[0]
  2379. token_string = t[1]
  2380. (start_row, start_column) = t[2]
  2381. (end_row, end_column) = t[3]
  2382. # Account for the whitespace between tokens.
  2383. end_offset += start_column
  2384. if previous_end_row == start_row:
  2385. end_offset -= previous_end_column
  2386. # Record the start offset of the token.
  2387. start_offset = end_offset
  2388. # Account for the length of the token itself.
  2389. end_offset += len(token_string)
  2390. yield (token_type,
  2391. token_string,
  2392. start_offset,
  2393. end_offset)
  2394. previous_end_row = end_row
  2395. previous_end_column = end_column
  2396. def normalize_multiline(line):
  2397. """Normalize multiline-related code that will cause syntax error.
  2398. This is for purposes of checking syntax.
  2399. """
  2400. if line.startswith('def ') and line.rstrip().endswith(':'):
  2401. return line + ' pass'
  2402. elif line.startswith('return '):
  2403. return 'def _(): ' + line
  2404. elif line.startswith('@'):
  2405. return line + 'def _(): pass'
  2406. elif line.startswith('class '):
  2407. return line + ' pass'
  2408. elif line.startswith(('if ', 'elif ', 'for ', 'while ')):
  2409. return line + ' pass'
  2410. return line
  2411. def fix_whitespace(line, offset, replacement):
  2412. """Replace whitespace at offset and return fixed line."""
  2413. # Replace escaped newlines too
  2414. left = line[:offset].rstrip('\n\r \t\\')
  2415. right = line[offset:].lstrip('\n\r \t\\')
  2416. if right.startswith('#'):
  2417. return line
  2418. return left + replacement + right
  2419. def _execute_pep8(pep8_options, source):
  2420. """Execute pycodestyle via python method calls."""
  2421. class QuietReport(pycodestyle.BaseReport):
  2422. """Version of checker that does not print."""
  2423. def __init__(self, options):
  2424. super(QuietReport, self).__init__(options)
  2425. self.__full_error_results = []
  2426. def error(self, line_number, offset, text, check):
  2427. """Collect errors."""
  2428. code = super(QuietReport, self).error(line_number,
  2429. offset,
  2430. text,
  2431. check)
  2432. if code:
  2433. self.__full_error_results.append(
  2434. {'id': code,
  2435. 'line': line_number,
  2436. 'column': offset + 1,
  2437. 'info': text})
  2438. def full_error_results(self):
  2439. """Return error results in detail.
  2440. Results are in the form of a list of dictionaries. Each
  2441. dictionary contains 'id', 'line', 'column', and 'info'.
  2442. """
  2443. return self.__full_error_results
  2444. checker = pycodestyle.Checker('', lines=source, reporter=QuietReport,
  2445. **pep8_options)
  2446. checker.check_all()
  2447. return checker.report.full_error_results()
  2448. def _remove_leading_and_normalize(line, with_rstrip=True):
  2449. # ignore FF in first lstrip()
  2450. if with_rstrip:
  2451. return line.lstrip(' \t\v').rstrip(CR + LF) + '\n'
  2452. return line.lstrip(' \t\v')
  2453. class Reindenter(object):
  2454. """Reindents badly-indented code to uniformly use four-space indentation.
  2455. Released to the public domain, by Tim Peters, 03 October 2000.
  2456. """
  2457. def __init__(self, input_text):
  2458. sio = io.StringIO(input_text)
  2459. source_lines = sio.readlines()
  2460. self.string_content_line_numbers = multiline_string_lines(input_text)
  2461. # File lines, rstripped & tab-expanded. Dummy at start is so
  2462. # that we can use tokenize's 1-based line numbering easily.
  2463. # Note that a line is all-blank iff it is a newline.
  2464. self.lines = []
  2465. for line_number, line in enumerate(source_lines, start=1):
  2466. # Do not modify if inside a multiline string.
  2467. if line_number in self.string_content_line_numbers:
  2468. self.lines.append(line)
  2469. else:
  2470. # Only expand leading tabs.
  2471. with_rstrip = line_number != len(source_lines)
  2472. self.lines.append(
  2473. _get_indentation(line).expandtabs() +
  2474. _remove_leading_and_normalize(line, with_rstrip)
  2475. )
  2476. self.lines.insert(0, None)
  2477. self.index = 1 # index into self.lines of next line
  2478. self.input_text = input_text
  2479. def run(self, indent_size=DEFAULT_INDENT_SIZE):
  2480. """Fix indentation and return modified line numbers.
  2481. Line numbers are indexed at 1.
  2482. """
  2483. if indent_size < 1:
  2484. return self.input_text
  2485. try:
  2486. stats = _reindent_stats(tokenize.generate_tokens(self.getline))
  2487. except (SyntaxError, tokenize.TokenError):
  2488. return self.input_text
  2489. # Remove trailing empty lines.
  2490. lines = self.lines
  2491. # Sentinel.
  2492. stats.append((len(lines), 0))
  2493. # Map count of leading spaces to # we want.
  2494. have2want = {}
  2495. # Program after transformation.
  2496. after = []
  2497. # Copy over initial empty lines -- there's nothing to do until
  2498. # we see a line with *something* on it.
  2499. i = stats[0][0]
  2500. after.extend(lines[1:i])
  2501. for i in range(len(stats) - 1):
  2502. thisstmt, thislevel = stats[i]
  2503. nextstmt = stats[i + 1][0]
  2504. have = _leading_space_count(lines[thisstmt])
  2505. want = thislevel * indent_size
  2506. if want < 0:
  2507. # A comment line.
  2508. if have:
  2509. # An indented comment line. If we saw the same
  2510. # indentation before, reuse what it most recently
  2511. # mapped to.
  2512. want = have2want.get(have, -1)
  2513. if want < 0:
  2514. # Then it probably belongs to the next real stmt.
  2515. for j in range(i + 1, len(stats) - 1):
  2516. jline, jlevel = stats[j]
  2517. if jlevel >= 0:
  2518. if have == _leading_space_count(lines[jline]):
  2519. want = jlevel * indent_size
  2520. break
  2521. # Maybe it's a hanging comment like this one,
  2522. if want < 0:
  2523. # in which case we should shift it like its base
  2524. # line got shifted.
  2525. for j in range(i - 1, -1, -1):
  2526. jline, jlevel = stats[j]
  2527. if jlevel >= 0:
  2528. want = (have + _leading_space_count(
  2529. after[jline - 1]) -
  2530. _leading_space_count(lines[jline]))
  2531. break
  2532. if want < 0:
  2533. # Still no luck -- leave it alone.
  2534. want = have
  2535. else:
  2536. want = 0
  2537. assert want >= 0
  2538. have2want[have] = want
  2539. diff = want - have
  2540. if diff == 0 or have == 0:
  2541. after.extend(lines[thisstmt:nextstmt])
  2542. else:
  2543. for line_number, line in enumerate(lines[thisstmt:nextstmt],
  2544. start=thisstmt):
  2545. if line_number in self.string_content_line_numbers:
  2546. after.append(line)
  2547. elif diff > 0:
  2548. if line == '\n':
  2549. after.append(line)
  2550. else:
  2551. after.append(' ' * diff + line)
  2552. else:
  2553. remove = min(_leading_space_count(line), -diff)
  2554. after.append(line[remove:])
  2555. return ''.join(after)
  2556. def getline(self):
  2557. """Line-getter for tokenize."""
  2558. if self.index >= len(self.lines):
  2559. line = ''
  2560. else:
  2561. line = self.lines[self.index]
  2562. self.index += 1
  2563. return line
  2564. def _reindent_stats(tokens):
  2565. """Return list of (lineno, indentlevel) pairs.
  2566. One for each stmt and comment line. indentlevel is -1 for comment
  2567. lines, as a signal that tokenize doesn't know what to do about them;
  2568. indeed, they're our headache!
  2569. """
  2570. find_stmt = 1 # Next token begins a fresh stmt?
  2571. level = 0 # Current indent level.
  2572. stats = []
  2573. for t in tokens:
  2574. token_type = t[0]
  2575. sline = t[2][0]
  2576. line = t[4]
  2577. if token_type == tokenize.NEWLINE:
  2578. # A program statement, or ENDMARKER, will eventually follow,
  2579. # after some (possibly empty) run of tokens of the form
  2580. # (NL | COMMENT)* (INDENT | DEDENT+)?
  2581. find_stmt = 1
  2582. elif token_type == tokenize.INDENT:
  2583. find_stmt = 1
  2584. level += 1
  2585. elif token_type == tokenize.DEDENT:
  2586. find_stmt = 1
  2587. level -= 1
  2588. elif token_type == tokenize.COMMENT:
  2589. if find_stmt:
  2590. stats.append((sline, -1))
  2591. # But we're still looking for a new stmt, so leave
  2592. # find_stmt alone.
  2593. elif token_type == tokenize.NL:
  2594. pass
  2595. elif find_stmt:
  2596. # This is the first "real token" following a NEWLINE, so it
  2597. # must be the first token of the next program statement, or an
  2598. # ENDMARKER.
  2599. find_stmt = 0
  2600. if line: # Not endmarker.
  2601. stats.append((sline, level))
  2602. return stats
  2603. def _leading_space_count(line):
  2604. """Return number of leading spaces in line."""
  2605. i = 0
  2606. while i < len(line) and line[i] == ' ':
  2607. i += 1
  2608. return i
  2609. def refactor_with_2to3(source_text, fixer_names, filename=''):
  2610. """Use lib2to3 to refactor the source.
  2611. Return the refactored source code.
  2612. """
  2613. from lib2to3.refactor import RefactoringTool
  2614. fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
  2615. tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
  2616. from lib2to3.pgen2 import tokenize as lib2to3_tokenize
  2617. try:
  2618. # The name parameter is necessary particularly for the "import" fixer.
  2619. return unicode(tool.refactor_string(source_text, name=filename))
  2620. except lib2to3_tokenize.TokenError:
  2621. return source_text
  2622. def check_syntax(code):
  2623. """Return True if syntax is okay."""
  2624. try:
  2625. return compile(code, '<string>', 'exec', dont_inherit=True)
  2626. except (SyntaxError, TypeError, ValueError):
  2627. return False
  2628. def find_with_line_numbers(pattern, contents):
  2629. """A wrapper around 're.finditer' to find line numbers.
  2630. Returns a list of line numbers where pattern was found in contents.
  2631. """
  2632. matches = list(re.finditer(pattern, contents))
  2633. if not matches:
  2634. return []
  2635. end = matches[-1].start()
  2636. # -1 so a failed `rfind` maps to the first line.
  2637. newline_offsets = {
  2638. -1: 0
  2639. }
  2640. for line_num, m in enumerate(re.finditer(r'\n', contents), 1):
  2641. offset = m.start()
  2642. if offset > end:
  2643. break
  2644. newline_offsets[offset] = line_num
  2645. def get_line_num(match, contents):
  2646. """Get the line number of string in a files contents.
  2647. Failing to find the newline is OK, -1 maps to 0
  2648. """
  2649. newline_offset = contents.rfind('\n', 0, match.start())
  2650. return newline_offsets[newline_offset]
  2651. return [get_line_num(match, contents) + 1 for match in matches]
  2652. def get_disabled_ranges(source):
  2653. """Returns a list of tuples representing the disabled ranges.
  2654. If disabled and no re-enable will disable for rest of file.
  2655. """
  2656. enable_line_nums = find_with_line_numbers(ENABLE_REGEX, source)
  2657. disable_line_nums = find_with_line_numbers(DISABLE_REGEX, source)
  2658. total_lines = len(re.findall("\n", source)) + 1
  2659. enable_commands = {}
  2660. for num in enable_line_nums:
  2661. enable_commands[num] = True
  2662. for num in disable_line_nums:
  2663. enable_commands[num] = False
  2664. disabled_ranges = []
  2665. currently_enabled = True
  2666. disabled_start = None
  2667. for line, commanded_enabled in sorted(enable_commands.items()):
  2668. if commanded_enabled is False and currently_enabled is True:
  2669. disabled_start = line
  2670. currently_enabled = False
  2671. elif commanded_enabled is True and currently_enabled is False:
  2672. disabled_ranges.append((disabled_start, line))
  2673. currently_enabled = True
  2674. if currently_enabled is False:
  2675. disabled_ranges.append((disabled_start, total_lines))
  2676. return disabled_ranges
  2677. def filter_disabled_results(result, disabled_ranges):
  2678. """Filter out reports based on tuple of disabled ranges.
  2679. """
  2680. line = result['line']
  2681. for disabled_range in disabled_ranges:
  2682. if disabled_range[0] <= line <= disabled_range[1]:
  2683. return False
  2684. return True
  2685. def filter_results(source, results, aggressive):
  2686. """Filter out spurious reports from pycodestyle.
  2687. If aggressive is True, we allow possibly unsafe fixes (E711, E712).
  2688. """
  2689. non_docstring_string_line_numbers = multiline_string_lines(
  2690. source, include_docstrings=False)
  2691. all_string_line_numbers = multiline_string_lines(
  2692. source, include_docstrings=True)
  2693. commented_out_code_line_numbers = commented_out_code_lines(source)
  2694. # Filter out the disabled ranges
  2695. disabled_ranges = get_disabled_ranges(source)
  2696. if disabled_ranges:
  2697. results = [
  2698. result for result in results if filter_disabled_results(
  2699. result,
  2700. disabled_ranges,
  2701. )
  2702. ]
  2703. has_e901 = any(result['id'].lower() == 'e901' for result in results)
  2704. for r in results:
  2705. issue_id = r['id'].lower()
  2706. if r['line'] in non_docstring_string_line_numbers:
  2707. if issue_id.startswith(('e1', 'e501', 'w191')):
  2708. continue
  2709. if r['line'] in all_string_line_numbers:
  2710. if issue_id in ['e501']:
  2711. continue
  2712. # We must offset by 1 for lines that contain the trailing contents of
  2713. # multiline strings.
  2714. if not aggressive and (r['line'] + 1) in all_string_line_numbers:
  2715. # Do not modify multiline strings in non-aggressive mode. Remove
  2716. # trailing whitespace could break doctests.
  2717. if issue_id.startswith(('w29', 'w39')):
  2718. continue
  2719. if aggressive <= 0:
  2720. if issue_id.startswith(('e711', 'e72', 'w6')):
  2721. continue
  2722. if aggressive <= 1:
  2723. if issue_id.startswith(('e712', 'e713', 'e714')):
  2724. continue
  2725. if aggressive <= 2:
  2726. if issue_id.startswith(('e704')):
  2727. continue
  2728. if r['line'] in commented_out_code_line_numbers:
  2729. if issue_id.startswith(('e26', 'e501')):
  2730. continue
  2731. # Do not touch indentation if there is a token error caused by
  2732. # incomplete multi-line statement. Otherwise, we risk screwing up the
  2733. # indentation.
  2734. if has_e901:
  2735. if issue_id.startswith(('e1', 'e7')):
  2736. continue
  2737. yield r
  2738. def multiline_string_lines(source, include_docstrings=False):
  2739. """Return line numbers that are within multiline strings.
  2740. The line numbers are indexed at 1.
  2741. Docstrings are ignored.
  2742. """
  2743. line_numbers = set()
  2744. previous_token_type = ''
  2745. try:
  2746. for t in generate_tokens(source):
  2747. token_type = t[0]
  2748. start_row = t[2][0]
  2749. end_row = t[3][0]
  2750. if token_type == tokenize.STRING and start_row != end_row:
  2751. if (
  2752. include_docstrings or
  2753. previous_token_type != tokenize.INDENT
  2754. ):
  2755. # We increment by one since we want the contents of the
  2756. # string.
  2757. line_numbers |= set(range(1 + start_row, 1 + end_row))
  2758. previous_token_type = token_type
  2759. except (SyntaxError, tokenize.TokenError):
  2760. pass
  2761. return line_numbers
  2762. def commented_out_code_lines(source):
  2763. """Return line numbers of comments that are likely code.
  2764. Commented-out code is bad practice, but modifying it just adds even
  2765. more clutter.
  2766. """
  2767. line_numbers = []
  2768. try:
  2769. for t in generate_tokens(source):
  2770. token_type = t[0]
  2771. token_string = t[1]
  2772. start_row = t[2][0]
  2773. line = t[4]
  2774. # Ignore inline comments.
  2775. if not line.lstrip().startswith('#'):
  2776. continue
  2777. if token_type == tokenize.COMMENT:
  2778. stripped_line = token_string.lstrip('#').strip()
  2779. with warnings.catch_warnings():
  2780. # ignore SyntaxWarning in Python3.8+
  2781. # refs:
  2782. # https://bugs.python.org/issue15248
  2783. # https://docs.python.org/3.8/whatsnew/3.8.html#other-language-changes
  2784. warnings.filterwarnings("ignore", category=SyntaxWarning)
  2785. if (
  2786. ' ' in stripped_line and
  2787. '#' not in stripped_line and
  2788. check_syntax(stripped_line)
  2789. ):
  2790. line_numbers.append(start_row)
  2791. except (SyntaxError, tokenize.TokenError):
  2792. pass
  2793. return line_numbers
  2794. def shorten_comment(line, max_line_length, last_comment=False):
  2795. """Return trimmed or split long comment line.
  2796. If there are no comments immediately following it, do a text wrap.
  2797. Doing this wrapping on all comments in general would lead to jagged
  2798. comment text.
  2799. """
  2800. assert len(line) > max_line_length
  2801. line = line.rstrip()
  2802. # PEP 8 recommends 72 characters for comment text.
  2803. indentation = _get_indentation(line) + '# '
  2804. max_line_length = min(max_line_length,
  2805. len(indentation) + 72)
  2806. MIN_CHARACTER_REPEAT = 5
  2807. if (
  2808. len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
  2809. not line[-1].isalnum()
  2810. ):
  2811. # Trim comments that end with things like ---------
  2812. return line[:max_line_length] + '\n'
  2813. elif last_comment and re.match(r'\s*#+\s*\w+', line):
  2814. split_lines = textwrap.wrap(line.lstrip(' \t#'),
  2815. initial_indent=indentation,
  2816. subsequent_indent=indentation,
  2817. width=max_line_length,
  2818. break_long_words=False,
  2819. break_on_hyphens=False)
  2820. return '\n'.join(split_lines) + '\n'
  2821. return line + '\n'
  2822. def normalize_line_endings(lines, newline):
  2823. """Return fixed line endings.
  2824. All lines will be modified to use the most common line ending.
  2825. """
  2826. line = [line.rstrip('\n\r') + newline for line in lines]
  2827. if line and lines[-1] == lines[-1].rstrip('\n\r'):
  2828. line[-1] = line[-1].rstrip('\n\r')
  2829. return line
  2830. def mutual_startswith(a, b):
  2831. return b.startswith(a) or a.startswith(b)
  2832. def code_match(code, select, ignore):
  2833. if ignore:
  2834. assert not isinstance(ignore, unicode)
  2835. for ignored_code in [c.strip() for c in ignore]:
  2836. if mutual_startswith(code.lower(), ignored_code.lower()):
  2837. return False
  2838. if select:
  2839. assert not isinstance(select, unicode)
  2840. for selected_code in [c.strip() for c in select]:
  2841. if mutual_startswith(code.lower(), selected_code.lower()):
  2842. return True
  2843. return False
  2844. return True
  2845. def fix_code(source, options=None, encoding=None, apply_config=False):
  2846. """Return fixed source code.
  2847. "encoding" will be used to decode "source" if it is a byte string.
  2848. """
  2849. options = _get_options(options, apply_config)
  2850. if not isinstance(source, unicode):
  2851. source = source.decode(encoding or get_encoding())
  2852. sio = io.StringIO(source)
  2853. return fix_lines(sio.readlines(), options=options)
  2854. def _get_options(raw_options, apply_config):
  2855. """Return parsed options."""
  2856. if not raw_options:
  2857. return parse_args([''], apply_config=apply_config)
  2858. if isinstance(raw_options, dict):
  2859. options = parse_args([''], apply_config=apply_config)
  2860. for name, value in raw_options.items():
  2861. if not hasattr(options, name):
  2862. raise ValueError("No such option '{}'".format(name))
  2863. # Check for very basic type errors.
  2864. expected_type = type(getattr(options, name))
  2865. if not isinstance(expected_type, (str, unicode)):
  2866. if isinstance(value, (str, unicode)):
  2867. raise ValueError(
  2868. "Option '{}' should not be a string".format(name))
  2869. setattr(options, name, value)
  2870. else:
  2871. options = raw_options
  2872. return options
  2873. def fix_lines(source_lines, options, filename=''):
  2874. """Return fixed source code."""
  2875. # Transform everything to line feed. Then change them back to original
  2876. # before returning fixed source code.
  2877. original_newline = find_newline(source_lines)
  2878. tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
  2879. # Keep a history to break out of cycles.
  2880. previous_hashes = set()
  2881. if options.line_range:
  2882. # Disable "apply_local_fixes()" for now due to issue #175.
  2883. fixed_source = tmp_source
  2884. else:
  2885. pep8_options = {
  2886. 'ignore': options.ignore,
  2887. 'select': options.select,
  2888. 'max_line_length': options.max_line_length,
  2889. 'hang_closing': options.hang_closing,
  2890. }
  2891. sio = io.StringIO(tmp_source)
  2892. contents = sio.readlines()
  2893. results = _execute_pep8(pep8_options, contents)
  2894. codes = {result['id'] for result in results
  2895. if result['id'] in SELECTED_GLOBAL_FIXED_METHOD_CODES}
  2896. # Apply global fixes only once (for efficiency).
  2897. fixed_source = apply_global_fixes(tmp_source,
  2898. options,
  2899. filename=filename,
  2900. codes=codes)
  2901. passes = 0
  2902. long_line_ignore_cache = set()
  2903. while hash(fixed_source) not in previous_hashes:
  2904. if options.pep8_passes >= 0 and passes > options.pep8_passes:
  2905. break
  2906. passes += 1
  2907. previous_hashes.add(hash(fixed_source))
  2908. tmp_source = copy.copy(fixed_source)
  2909. fix = FixPEP8(
  2910. filename,
  2911. options,
  2912. contents=tmp_source,
  2913. long_line_ignore_cache=long_line_ignore_cache)
  2914. fixed_source = fix.fix()
  2915. sio = io.StringIO(fixed_source)
  2916. return ''.join(normalize_line_endings(sio.readlines(), original_newline))
  2917. def fix_file(filename, options=None, output=None, apply_config=False):
  2918. if not options:
  2919. options = parse_args([filename], apply_config=apply_config)
  2920. original_source = readlines_from_file(filename)
  2921. fixed_source = original_source
  2922. if options.in_place or options.diff or output:
  2923. encoding = detect_encoding(filename)
  2924. if output:
  2925. output = LineEndingWrapper(wrap_output(output, encoding=encoding))
  2926. fixed_source = fix_lines(fixed_source, options, filename=filename)
  2927. if options.diff:
  2928. new = io.StringIO(fixed_source)
  2929. new = new.readlines()
  2930. diff = get_diff_text(original_source, new, filename)
  2931. if output:
  2932. output.write(diff)
  2933. output.flush()
  2934. elif options.jobs > 1:
  2935. diff = diff.encode(encoding)
  2936. return diff
  2937. elif options.in_place:
  2938. original = "".join(original_source).splitlines()
  2939. fixed = fixed_source.splitlines()
  2940. original_source_last_line = (
  2941. original_source[-1].split("\n")[-1] if original_source else ""
  2942. )
  2943. fixed_source_last_line = fixed_source.split("\n")[-1]
  2944. if original != fixed or (
  2945. original_source_last_line != fixed_source_last_line
  2946. ):
  2947. with open_with_encoding(filename, 'w', encoding=encoding) as fp:
  2948. fp.write(fixed_source)
  2949. return fixed_source
  2950. return None
  2951. else:
  2952. if output:
  2953. output.write(fixed_source)
  2954. output.flush()
  2955. return fixed_source
  2956. def global_fixes():
  2957. """Yield multiple (code, function) tuples."""
  2958. for function in list(globals().values()):
  2959. if inspect.isfunction(function):
  2960. arguments = _get_parameters(function)
  2961. if arguments[:1] != ['source']:
  2962. continue
  2963. code = extract_code_from_function(function)
  2964. if code:
  2965. yield (code, function)
  2966. def _get_parameters(function):
  2967. # pylint: disable=deprecated-method
  2968. if sys.version_info.major >= 3:
  2969. # We need to match "getargspec()", which includes "self" as the first
  2970. # value for methods.
  2971. # https://bugs.python.org/issue17481#msg209469
  2972. if inspect.ismethod(function):
  2973. function = function.__func__
  2974. return list(inspect.signature(function).parameters)
  2975. else:
  2976. return inspect.getargspec(function)[0]
  2977. def apply_global_fixes(source, options, where='global', filename='',
  2978. codes=None):
  2979. """Run global fixes on source code.
  2980. These are fixes that only need be done once (unlike those in
  2981. FixPEP8, which are dependent on pycodestyle).
  2982. """
  2983. if codes is None:
  2984. codes = []
  2985. if any(code_match(code, select=options.select, ignore=options.ignore)
  2986. for code in ['E101', 'E111']):
  2987. source = reindent(source,
  2988. indent_size=options.indent_size)
  2989. for (code, function) in global_fixes():
  2990. if code.upper() in SELECTED_GLOBAL_FIXED_METHOD_CODES \
  2991. and code.upper() not in codes:
  2992. continue
  2993. if code_match(code, select=options.select, ignore=options.ignore):
  2994. if options.verbose:
  2995. print('---> Applying {} fix for {}'.format(where,
  2996. code.upper()),
  2997. file=sys.stderr)
  2998. source = function(source,
  2999. aggressive=options.aggressive)
  3000. source = fix_2to3(source,
  3001. aggressive=options.aggressive,
  3002. select=options.select,
  3003. ignore=options.ignore,
  3004. filename=filename,
  3005. where=where,
  3006. verbose=options.verbose)
  3007. return source
  3008. def extract_code_from_function(function):
  3009. """Return code handled by function."""
  3010. if not function.__name__.startswith('fix_'):
  3011. return None
  3012. code = re.sub('^fix_', '', function.__name__)
  3013. if not code:
  3014. return None
  3015. try:
  3016. int(code[1:])
  3017. except ValueError:
  3018. return None
  3019. return code
  3020. def _get_package_version():
  3021. packages = ["pycodestyle: {}".format(pycodestyle.__version__)]
  3022. return ", ".join(packages)
  3023. def create_parser():
  3024. """Return command-line parser."""
  3025. parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
  3026. prog='autopep8')
  3027. parser.add_argument('--version', action='version',
  3028. version='%(prog)s {} ({})'.format(
  3029. __version__, _get_package_version()))
  3030. parser.add_argument('-v', '--verbose', action='count',
  3031. default=0,
  3032. help='print verbose messages; '
  3033. 'multiple -v result in more verbose messages')
  3034. parser.add_argument('-d', '--diff', action='store_true',
  3035. help='print the diff for the fixed source')
  3036. parser.add_argument('-i', '--in-place', action='store_true',
  3037. help='make changes to files in place')
  3038. parser.add_argument('--global-config', metavar='filename',
  3039. default=DEFAULT_CONFIG,
  3040. help='path to a global pep8 config file; if this file '
  3041. 'does not exist then this is ignored '
  3042. '(default: {})'.format(DEFAULT_CONFIG))
  3043. parser.add_argument('--ignore-local-config', action='store_true',
  3044. help="don't look for and apply local config files; "
  3045. 'if not passed, defaults are updated with any '
  3046. "config files in the project's root directory")
  3047. parser.add_argument('-r', '--recursive', action='store_true',
  3048. help='run recursively over directories; '
  3049. 'must be used with --in-place or --diff')
  3050. parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
  3051. help='number of parallel jobs; '
  3052. 'match CPU count if value is less than 1')
  3053. parser.add_argument('-p', '--pep8-passes', metavar='n',
  3054. default=-1, type=int,
  3055. help='maximum number of additional pep8 passes '
  3056. '(default: infinite)')
  3057. parser.add_argument('-a', '--aggressive', action='count', default=0,
  3058. help='enable non-whitespace changes; '
  3059. 'multiple -a result in more aggressive changes')
  3060. parser.add_argument('--experimental', action='store_true',
  3061. help='enable experimental fixes')
  3062. parser.add_argument('--exclude', metavar='globs',
  3063. help='exclude file/directory names that match these '
  3064. 'comma-separated globs')
  3065. parser.add_argument('--list-fixes', action='store_true',
  3066. help='list codes for fixes; '
  3067. 'used by --ignore and --select')
  3068. parser.add_argument('--ignore', metavar='errors', default='',
  3069. help='do not fix these errors/warnings '
  3070. '(default: {})'.format(DEFAULT_IGNORE))
  3071. parser.add_argument('--select', metavar='errors', default='',
  3072. help='fix only these errors/warnings (e.g. E4,W)')
  3073. parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
  3074. help='set maximum allowed line length '
  3075. '(default: %(default)s)')
  3076. parser.add_argument('--line-range', '--range', metavar='line',
  3077. default=None, type=int, nargs=2,
  3078. help='only fix errors found within this inclusive '
  3079. 'range of line numbers (e.g. 1 99); '
  3080. 'line numbers are indexed at 1')
  3081. parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
  3082. type=int, help=argparse.SUPPRESS)
  3083. parser.add_argument('--hang-closing', action='store_true',
  3084. help='hang-closing option passed to pycodestyle')
  3085. parser.add_argument('--exit-code', action='store_true',
  3086. help='change to behavior of exit code.'
  3087. ' default behavior of return value, 0 is no '
  3088. 'differences, 1 is error exit. return 2 when'
  3089. ' add this option. 2 is exists differences.')
  3090. parser.add_argument('files', nargs='*',
  3091. help="files to format or '-' for standard in")
  3092. return parser
  3093. def _expand_codes(codes, ignore_codes):
  3094. """expand to individual E/W codes"""
  3095. ret = set()
  3096. is_conflict = False
  3097. if all(
  3098. any(
  3099. conflicting_code.startswith(code)
  3100. for code in codes
  3101. )
  3102. for conflicting_code in CONFLICTING_CODES
  3103. ):
  3104. is_conflict = True
  3105. is_ignore_w503 = "W503" in ignore_codes
  3106. is_ignore_w504 = "W504" in ignore_codes
  3107. for code in codes:
  3108. if code == "W":
  3109. if is_ignore_w503 and is_ignore_w504:
  3110. ret.update({"W1", "W2", "W3", "W505", "W6"})
  3111. elif is_ignore_w503:
  3112. ret.update({"W1", "W2", "W3", "W504", "W505", "W6"})
  3113. else:
  3114. ret.update({"W1", "W2", "W3", "W503", "W505", "W6"})
  3115. elif code in ("W5", "W50"):
  3116. if is_ignore_w503 and is_ignore_w504:
  3117. ret.update({"W505"})
  3118. elif is_ignore_w503:
  3119. ret.update({"W504", "W505"})
  3120. else:
  3121. ret.update({"W503", "W505"})
  3122. elif not (code in ("W503", "W504") and is_conflict):
  3123. ret.add(code)
  3124. return ret
  3125. def parse_args(arguments, apply_config=False):
  3126. """Parse command-line options."""
  3127. parser = create_parser()
  3128. args = parser.parse_args(arguments)
  3129. if not args.files and not args.list_fixes:
  3130. parser.exit(EXIT_CODE_ARGPARSE_ERROR, 'incorrect number of arguments')
  3131. args.files = [decode_filename(name) for name in args.files]
  3132. if apply_config:
  3133. parser = read_config(args, parser)
  3134. # prioritize settings when exist pyproject.toml's tool.autopep8 section
  3135. try:
  3136. parser_with_pyproject_toml = read_pyproject_toml(args, parser)
  3137. except Exception:
  3138. parser_with_pyproject_toml = None
  3139. if parser_with_pyproject_toml:
  3140. parser = parser_with_pyproject_toml
  3141. args = parser.parse_args(arguments)
  3142. args.files = [decode_filename(name) for name in args.files]
  3143. if '-' in args.files:
  3144. if len(args.files) > 1:
  3145. parser.exit(
  3146. EXIT_CODE_ARGPARSE_ERROR,
  3147. 'cannot mix stdin and regular files',
  3148. )
  3149. if args.diff:
  3150. parser.exit(
  3151. EXIT_CODE_ARGPARSE_ERROR,
  3152. '--diff cannot be used with standard input',
  3153. )
  3154. if args.in_place:
  3155. parser.exit(
  3156. EXIT_CODE_ARGPARSE_ERROR,
  3157. '--in-place cannot be used with standard input',
  3158. )
  3159. if args.recursive:
  3160. parser.exit(
  3161. EXIT_CODE_ARGPARSE_ERROR,
  3162. '--recursive cannot be used with standard input',
  3163. )
  3164. if len(args.files) > 1 and not (args.in_place or args.diff):
  3165. parser.exit(
  3166. EXIT_CODE_ARGPARSE_ERROR,
  3167. 'autopep8 only takes one filename as argument '
  3168. 'unless the "--in-place" or "--diff" args are used',
  3169. )
  3170. if args.recursive and not (args.in_place or args.diff):
  3171. parser.exit(
  3172. EXIT_CODE_ARGPARSE_ERROR,
  3173. '--recursive must be used with --in-place or --diff',
  3174. )
  3175. if args.in_place and args.diff:
  3176. parser.exit(
  3177. EXIT_CODE_ARGPARSE_ERROR,
  3178. '--in-place and --diff are mutually exclusive',
  3179. )
  3180. if args.max_line_length <= 0:
  3181. parser.exit(
  3182. EXIT_CODE_ARGPARSE_ERROR,
  3183. '--max-line-length must be greater than 0',
  3184. )
  3185. if args.indent_size <= 0:
  3186. parser.exit(
  3187. EXIT_CODE_ARGPARSE_ERROR,
  3188. '--indent-size must be greater than 0',
  3189. )
  3190. if args.select:
  3191. args.select = _expand_codes(
  3192. _split_comma_separated(args.select),
  3193. (_split_comma_separated(args.ignore) if args.ignore else [])
  3194. )
  3195. if args.ignore:
  3196. args.ignore = _split_comma_separated(args.ignore)
  3197. if all(
  3198. not any(
  3199. conflicting_code.startswith(ignore_code)
  3200. for ignore_code in args.ignore
  3201. )
  3202. for conflicting_code in CONFLICTING_CODES
  3203. ):
  3204. args.ignore.update(CONFLICTING_CODES)
  3205. elif not args.select:
  3206. if args.aggressive:
  3207. # Enable everything by default if aggressive.
  3208. args.select = {'E', 'W1', 'W2', 'W3', 'W6'}
  3209. else:
  3210. args.ignore = _split_comma_separated(DEFAULT_IGNORE)
  3211. if args.exclude:
  3212. args.exclude = _split_comma_separated(args.exclude)
  3213. else:
  3214. args.exclude = {}
  3215. if args.jobs < 1:
  3216. # Do not import multiprocessing globally in case it is not supported
  3217. # on the platform.
  3218. import multiprocessing
  3219. args.jobs = multiprocessing.cpu_count()
  3220. if args.jobs > 1 and not (args.in_place or args.diff):
  3221. parser.exit(
  3222. EXIT_CODE_ARGPARSE_ERROR,
  3223. 'parallel jobs requires --in-place',
  3224. )
  3225. if args.line_range:
  3226. if args.line_range[0] <= 0:
  3227. parser.exit(
  3228. EXIT_CODE_ARGPARSE_ERROR,
  3229. '--range must be positive numbers',
  3230. )
  3231. if args.line_range[0] > args.line_range[1]:
  3232. parser.exit(
  3233. EXIT_CODE_ARGPARSE_ERROR,
  3234. 'First value of --range should be less than or equal '
  3235. 'to the second',
  3236. )
  3237. return args
  3238. def _get_normalize_options(config, section, option_list):
  3239. for (k, _) in config.items(section):
  3240. norm_opt = k.lstrip('-').replace('-', '_')
  3241. if not option_list.get(norm_opt):
  3242. continue
  3243. opt_type = option_list[norm_opt]
  3244. if opt_type is int:
  3245. value = config.getint(section, k)
  3246. elif opt_type is bool:
  3247. value = config.getboolean(section, k)
  3248. else:
  3249. value = config.get(section, k)
  3250. yield norm_opt, k, value
  3251. def read_config(args, parser):
  3252. """Read both user configuration and local configuration."""
  3253. config = SafeConfigParser()
  3254. try:
  3255. config.read(args.global_config)
  3256. if not args.ignore_local_config:
  3257. parent = tail = args.files and os.path.abspath(
  3258. os.path.commonprefix(args.files))
  3259. while tail:
  3260. if config.read([os.path.join(parent, fn)
  3261. for fn in PROJECT_CONFIG]):
  3262. break
  3263. (parent, tail) = os.path.split(parent)
  3264. defaults = {}
  3265. option_list = {o.dest: o.type or type(o.default)
  3266. for o in parser._actions}
  3267. for section in ['pep8', 'pycodestyle', 'flake8']:
  3268. if not config.has_section(section):
  3269. continue
  3270. for norm_opt, k, value in _get_normalize_options(config, section,
  3271. option_list):
  3272. if args.verbose:
  3273. print("enable config: section={}, key={}, value={}".format(
  3274. section, k, value))
  3275. defaults[norm_opt] = value
  3276. parser.set_defaults(**defaults)
  3277. except Error:
  3278. # Ignore for now.
  3279. pass
  3280. return parser
  3281. def read_pyproject_toml(args, parser):
  3282. """Read pyproject.toml and load configuration."""
  3283. import toml
  3284. config = None
  3285. if os.path.exists(args.global_config):
  3286. with open(args.global_config) as fp:
  3287. config = toml.load(fp)
  3288. if not args.ignore_local_config:
  3289. parent = tail = args.files and os.path.abspath(
  3290. os.path.commonprefix(args.files))
  3291. while tail:
  3292. pyproject_toml = os.path.join(parent, "pyproject.toml")
  3293. if os.path.exists(pyproject_toml):
  3294. with open(pyproject_toml) as fp:
  3295. config = toml.load(fp)
  3296. break
  3297. (parent, tail) = os.path.split(parent)
  3298. if not config:
  3299. return None
  3300. if config.get("tool", {}).get("autopep8") is None:
  3301. return None
  3302. config = config.get("tool").get("autopep8")
  3303. defaults = {}
  3304. option_list = {o.dest: o.type or type(o.default)
  3305. for o in parser._actions}
  3306. TUPLED_OPTIONS = ("ignore", "select")
  3307. for (k, v) in config.items():
  3308. norm_opt = k.lstrip('-').replace('-', '_')
  3309. if not option_list.get(norm_opt):
  3310. continue
  3311. if type(v) in (list, tuple) and norm_opt in TUPLED_OPTIONS:
  3312. value = ",".join(v)
  3313. else:
  3314. value = v
  3315. if args.verbose:
  3316. print("enable pyproject.toml config: "
  3317. "key={}, value={}".format(k, value))
  3318. defaults[norm_opt] = value
  3319. if defaults:
  3320. # set value when exists key-value in defaults dict
  3321. parser.set_defaults(**defaults)
  3322. return parser
  3323. def _split_comma_separated(string):
  3324. """Return a set of strings."""
  3325. return {text.strip() for text in string.split(',') if text.strip()}
  3326. def decode_filename(filename):
  3327. """Return Unicode filename."""
  3328. if isinstance(filename, unicode):
  3329. return filename
  3330. return filename.decode(sys.getfilesystemencoding())
  3331. def supported_fixes():
  3332. """Yield pep8 error codes that autopep8 fixes.
  3333. Each item we yield is a tuple of the code followed by its
  3334. description.
  3335. """
  3336. yield ('E101', docstring_summary(reindent.__doc__))
  3337. instance = FixPEP8(filename=None, options=None, contents='')
  3338. for attribute in dir(instance):
  3339. code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
  3340. if code:
  3341. yield (
  3342. code.group(1).upper(),
  3343. re.sub(r'\s+', ' ',
  3344. docstring_summary(getattr(instance, attribute).__doc__))
  3345. )
  3346. for (code, function) in sorted(global_fixes()):
  3347. yield (code.upper() + (4 - len(code)) * ' ',
  3348. re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
  3349. for code in sorted(CODE_TO_2TO3):
  3350. yield (code.upper() + (4 - len(code)) * ' ',
  3351. re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
  3352. def docstring_summary(docstring):
  3353. """Return summary of docstring."""
  3354. return docstring.split('\n')[0] if docstring else ''
  3355. def line_shortening_rank(candidate, indent_word, max_line_length,
  3356. experimental=False):
  3357. """Return rank of candidate.
  3358. This is for sorting candidates.
  3359. """
  3360. if not candidate.strip():
  3361. return 0
  3362. rank = 0
  3363. lines = candidate.rstrip().split('\n')
  3364. offset = 0
  3365. if (
  3366. not lines[0].lstrip().startswith('#') and
  3367. lines[0].rstrip()[-1] not in '([{'
  3368. ):
  3369. for (opening, closing) in ('()', '[]', '{}'):
  3370. # Don't penalize empty containers that aren't split up. Things like
  3371. # this "foo(\n )" aren't particularly good.
  3372. opening_loc = lines[0].find(opening)
  3373. closing_loc = lines[0].find(closing)
  3374. if opening_loc >= 0:
  3375. if closing_loc < 0 or closing_loc != opening_loc + 1:
  3376. offset = max(offset, 1 + opening_loc)
  3377. current_longest = max(offset + len(x.strip()) for x in lines)
  3378. rank += 4 * max(0, current_longest - max_line_length)
  3379. rank += len(lines)
  3380. # Too much variation in line length is ugly.
  3381. rank += 2 * standard_deviation(len(line) for line in lines)
  3382. bad_staring_symbol = {
  3383. '(': ')',
  3384. '[': ']',
  3385. '{': '}'}.get(lines[0][-1])
  3386. if len(lines) > 1:
  3387. if (
  3388. bad_staring_symbol and
  3389. lines[1].lstrip().startswith(bad_staring_symbol)
  3390. ):
  3391. rank += 20
  3392. for lineno, current_line in enumerate(lines):
  3393. current_line = current_line.strip()
  3394. if current_line.startswith('#'):
  3395. continue
  3396. for bad_start in ['.', '%', '+', '-', '/']:
  3397. if current_line.startswith(bad_start):
  3398. rank += 100
  3399. # Do not tolerate operators on their own line.
  3400. if current_line == bad_start:
  3401. rank += 1000
  3402. if (
  3403. current_line.endswith(('.', '%', '+', '-', '/')) and
  3404. "': " in current_line
  3405. ):
  3406. rank += 1000
  3407. if current_line.endswith(('(', '[', '{', '.')):
  3408. # Avoid lonely opening. They result in longer lines.
  3409. if len(current_line) <= len(indent_word):
  3410. rank += 100
  3411. # Avoid the ugliness of ", (\n".
  3412. if (
  3413. current_line.endswith('(') and
  3414. current_line[:-1].rstrip().endswith(',')
  3415. ):
  3416. rank += 100
  3417. # Avoid the ugliness of "something[\n" and something[index][\n.
  3418. if (
  3419. current_line.endswith('[') and
  3420. len(current_line) > 1 and
  3421. (current_line[-2].isalnum() or current_line[-2] in ']')
  3422. ):
  3423. rank += 300
  3424. # Also avoid the ugliness of "foo.\nbar"
  3425. if current_line.endswith('.'):
  3426. rank += 100
  3427. if has_arithmetic_operator(current_line):
  3428. rank += 100
  3429. # Avoid breaking at unary operators.
  3430. if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')):
  3431. rank += 1000
  3432. if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')):
  3433. rank += 1000
  3434. if current_line.endswith(('%', '(', '[', '{')):
  3435. rank -= 20
  3436. # Try to break list comprehensions at the "for".
  3437. if current_line.startswith('for '):
  3438. rank -= 50
  3439. if current_line.endswith('\\'):
  3440. # If a line ends in \-newline, it may be part of a
  3441. # multiline string. In that case, we would like to know
  3442. # how long that line is without the \-newline. If it's
  3443. # longer than the maximum, or has comments, then we assume
  3444. # that the \-newline is an okay candidate and only
  3445. # penalize it a bit.
  3446. total_len = len(current_line)
  3447. lineno += 1
  3448. while lineno < len(lines):
  3449. total_len += len(lines[lineno])
  3450. if lines[lineno].lstrip().startswith('#'):
  3451. total_len = max_line_length
  3452. break
  3453. if not lines[lineno].endswith('\\'):
  3454. break
  3455. lineno += 1
  3456. if total_len < max_line_length:
  3457. rank += 10
  3458. else:
  3459. rank += 100 if experimental else 1
  3460. # Prefer breaking at commas rather than colon.
  3461. if ',' in current_line and current_line.endswith(':'):
  3462. rank += 10
  3463. # Avoid splitting dictionaries between key and value.
  3464. if current_line.endswith(':'):
  3465. rank += 100
  3466. rank += 10 * count_unbalanced_brackets(current_line)
  3467. return max(0, rank)
  3468. def standard_deviation(numbers):
  3469. """Return standard deviation."""
  3470. numbers = list(numbers)
  3471. if not numbers:
  3472. return 0
  3473. mean = sum(numbers) / len(numbers)
  3474. return (sum((n - mean) ** 2 for n in numbers) /
  3475. len(numbers)) ** .5
  3476. def has_arithmetic_operator(line):
  3477. """Return True if line contains any arithmetic operators."""
  3478. for operator in pycodestyle.ARITHMETIC_OP:
  3479. if operator in line:
  3480. return True
  3481. return False
  3482. def count_unbalanced_brackets(line):
  3483. """Return number of unmatched open/close brackets."""
  3484. count = 0
  3485. for opening, closing in ['()', '[]', '{}']:
  3486. count += abs(line.count(opening) - line.count(closing))
  3487. return count
  3488. def split_at_offsets(line, offsets):
  3489. """Split line at offsets.
  3490. Return list of strings.
  3491. """
  3492. result = []
  3493. previous_offset = 0
  3494. current_offset = 0
  3495. for current_offset in sorted(offsets):
  3496. if current_offset < len(line) and previous_offset != current_offset:
  3497. result.append(line[previous_offset:current_offset].strip())
  3498. previous_offset = current_offset
  3499. result.append(line[current_offset:])
  3500. return result
  3501. class LineEndingWrapper(object):
  3502. r"""Replace line endings to work with sys.stdout.
  3503. It seems that sys.stdout expects only '\n' as the line ending, no matter
  3504. the platform. Otherwise, we get repeated line endings.
  3505. """
  3506. def __init__(self, output):
  3507. self.__output = output
  3508. def write(self, s):
  3509. self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
  3510. def flush(self):
  3511. self.__output.flush()
  3512. def match_file(filename, exclude):
  3513. """Return True if file is okay for modifying/recursing."""
  3514. base_name = os.path.basename(filename)
  3515. if base_name.startswith('.'):
  3516. return False
  3517. for pattern in exclude:
  3518. if fnmatch.fnmatch(base_name, pattern):
  3519. return False
  3520. if fnmatch.fnmatch(filename, pattern):
  3521. return False
  3522. if not os.path.isdir(filename) and not is_python_file(filename):
  3523. return False
  3524. return True
  3525. def find_files(filenames, recursive, exclude):
  3526. """Yield filenames."""
  3527. while filenames:
  3528. name = filenames.pop(0)
  3529. if recursive and os.path.isdir(name):
  3530. for root, directories, children in os.walk(name):
  3531. filenames += [os.path.join(root, f) for f in children
  3532. if match_file(os.path.join(root, f),
  3533. exclude)]
  3534. directories[:] = [d for d in directories
  3535. if match_file(os.path.join(root, d),
  3536. exclude)]
  3537. else:
  3538. is_exclude_match = False
  3539. for pattern in exclude:
  3540. if fnmatch.fnmatch(name, pattern):
  3541. is_exclude_match = True
  3542. break
  3543. if not is_exclude_match:
  3544. yield name
  3545. def _fix_file(parameters):
  3546. """Helper function for optionally running fix_file() in parallel."""
  3547. if parameters[1].verbose:
  3548. print('[file:{}]'.format(parameters[0]), file=sys.stderr)
  3549. try:
  3550. return fix_file(*parameters)
  3551. except IOError as error:
  3552. print(unicode(error), file=sys.stderr)
  3553. raise error
  3554. def fix_multiple_files(filenames, options, output=None):
  3555. """Fix list of files.
  3556. Optionally fix files recursively.
  3557. """
  3558. results = []
  3559. filenames = find_files(filenames, options.recursive, options.exclude)
  3560. if options.jobs > 1:
  3561. import multiprocessing
  3562. pool = multiprocessing.Pool(options.jobs)
  3563. rets = []
  3564. for name in filenames:
  3565. ret = pool.apply_async(_fix_file, ((name, options),))
  3566. rets.append(ret)
  3567. pool.close()
  3568. pool.join()
  3569. if options.diff:
  3570. for r in rets:
  3571. sys.stdout.write(r.get().decode())
  3572. sys.stdout.flush()
  3573. results.extend([x.get() for x in rets if x is not None])
  3574. else:
  3575. for name in filenames:
  3576. ret = _fix_file((name, options, output))
  3577. if ret is None:
  3578. continue
  3579. if options.diff:
  3580. if ret != '':
  3581. results.append(ret)
  3582. elif options.in_place:
  3583. results.append(ret)
  3584. else:
  3585. original_source = readlines_from_file(name)
  3586. if "".join(original_source).splitlines() != ret.splitlines():
  3587. results.append(ret)
  3588. return results
  3589. def is_python_file(filename):
  3590. """Return True if filename is Python file."""
  3591. if filename.endswith('.py'):
  3592. return True
  3593. try:
  3594. with open_with_encoding(
  3595. filename,
  3596. limit_byte_check=MAX_PYTHON_FILE_DETECTION_BYTES) as f:
  3597. text = f.read(MAX_PYTHON_FILE_DETECTION_BYTES)
  3598. if not text:
  3599. return False
  3600. first_line = text.splitlines()[0]
  3601. except (IOError, IndexError):
  3602. return False
  3603. if not PYTHON_SHEBANG_REGEX.match(first_line):
  3604. return False
  3605. return True
  3606. def is_probably_part_of_multiline(line):
  3607. """Return True if line is likely part of a multiline string.
  3608. When multiline strings are involved, pep8 reports the error as being
  3609. at the start of the multiline string, which doesn't work for us.
  3610. """
  3611. return (
  3612. '"""' in line or
  3613. "'''" in line or
  3614. line.rstrip().endswith('\\')
  3615. )
  3616. def wrap_output(output, encoding):
  3617. """Return output with specified encoding."""
  3618. return codecs.getwriter(encoding)(output.buffer
  3619. if hasattr(output, 'buffer')
  3620. else output)
  3621. def get_encoding():
  3622. """Return preferred encoding."""
  3623. return locale.getpreferredencoding() or sys.getdefaultencoding()
  3624. def main(argv=None, apply_config=True):
  3625. """Command-line entry."""
  3626. if argv is None:
  3627. argv = sys.argv
  3628. try:
  3629. # Exit on broken pipe.
  3630. signal.signal(signal.SIGPIPE, signal.SIG_DFL)
  3631. except AttributeError: # pragma: no cover
  3632. # SIGPIPE is not available on Windows.
  3633. pass
  3634. try:
  3635. args = parse_args(argv[1:], apply_config=apply_config)
  3636. if args.list_fixes:
  3637. for code, description in sorted(supported_fixes()):
  3638. print('{code} - {description}'.format(
  3639. code=code, description=description))
  3640. return EXIT_CODE_OK
  3641. if args.files == ['-']:
  3642. assert not args.in_place
  3643. encoding = sys.stdin.encoding or get_encoding()
  3644. read_stdin = sys.stdin.read()
  3645. fixed_stdin = fix_code(read_stdin, args, encoding=encoding)
  3646. # LineEndingWrapper is unnecessary here due to the symmetry between
  3647. # standard in and standard out.
  3648. wrap_output(sys.stdout, encoding=encoding).write(fixed_stdin)
  3649. if hash(read_stdin) != hash(fixed_stdin):
  3650. if args.exit_code:
  3651. return EXIT_CODE_EXISTS_DIFF
  3652. else:
  3653. if args.in_place or args.diff:
  3654. args.files = list(set(args.files))
  3655. else:
  3656. assert len(args.files) == 1
  3657. assert not args.recursive
  3658. results = fix_multiple_files(args.files, args, sys.stdout)
  3659. if args.diff:
  3660. ret = any([len(ret) != 0 for ret in results])
  3661. else:
  3662. # with in-place option
  3663. ret = any([ret is not None for ret in results])
  3664. if args.exit_code and ret:
  3665. return EXIT_CODE_EXISTS_DIFF
  3666. except IOError:
  3667. return EXIT_CODE_ERROR
  3668. except KeyboardInterrupt:
  3669. return EXIT_CODE_ERROR # pragma: no cover
  3670. class CachedTokenizer(object):
  3671. """A one-element cache around tokenize.generate_tokens().
  3672. Original code written by Ned Batchelder, in coverage.py.
  3673. """
  3674. def __init__(self):
  3675. self.last_text = None
  3676. self.last_tokens = None
  3677. def generate_tokens(self, text):
  3678. """A stand-in for tokenize.generate_tokens()."""
  3679. if text != self.last_text:
  3680. string_io = io.StringIO(text)
  3681. self.last_tokens = list(
  3682. tokenize.generate_tokens(string_io.readline)
  3683. )
  3684. self.last_text = text
  3685. return self.last_tokens
  3686. _cached_tokenizer = CachedTokenizer()
  3687. generate_tokens = _cached_tokenizer.generate_tokens
  3688. if __name__ == '__main__':
  3689. sys.exit(main())