statement_splitter.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #
  2. # Copyright (C) 2009-2020 the sqlparse authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of python-sqlparse and is released under
  6. # the BSD License: https://opensource.org/licenses/BSD-3-Clause
  7. from sqlparse import sql, tokens as T
  8. class StatementSplitter:
  9. """Filter that split stream at individual statements"""
  10. def __init__(self):
  11. self._reset()
  12. def _reset(self):
  13. """Set the filter attributes to its default values"""
  14. self._in_declare = False
  15. self._is_create = False
  16. self._begin_depth = 0
  17. self.consume_ws = False
  18. self.tokens = []
  19. self.level = 0
  20. def _change_splitlevel(self, ttype, value):
  21. """Get the new split level (increase, decrease or remain equal)"""
  22. # parenthesis increase/decrease a level
  23. if ttype is T.Punctuation and value == '(':
  24. return 1
  25. elif ttype is T.Punctuation and value == ')':
  26. return -1
  27. elif ttype not in T.Keyword: # if normal token return
  28. return 0
  29. # Everything after here is ttype = T.Keyword
  30. # Also to note, once entered an If statement you are done and basically
  31. # returning
  32. unified = value.upper()
  33. # three keywords begin with CREATE, but only one of them is DDL
  34. # DDL Create though can contain more words such as "or replace"
  35. if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
  36. self._is_create = True
  37. return 0
  38. # can have nested declare inside of being...
  39. if unified == 'DECLARE' and self._is_create and self._begin_depth == 0:
  40. self._in_declare = True
  41. return 1
  42. if unified == 'BEGIN':
  43. self._begin_depth += 1
  44. if self._is_create:
  45. # FIXME(andi): This makes no sense.
  46. return 1
  47. return 0
  48. # Should this respect a preceding BEGIN?
  49. # In CASE ... WHEN ... END this results in a split level -1.
  50. # Would having multiple CASE WHEN END and a Assignment Operator
  51. # cause the statement to cut off prematurely?
  52. if unified == 'END':
  53. self._begin_depth = max(0, self._begin_depth - 1)
  54. return -1
  55. if (unified in ('IF', 'FOR', 'WHILE', 'CASE')
  56. and self._is_create and self._begin_depth > 0):
  57. return 1
  58. if unified in ('END IF', 'END FOR', 'END WHILE'):
  59. return -1
  60. # Default
  61. return 0
  62. def process(self, stream):
  63. """Process the stream"""
  64. EOS_TTYPE = T.Whitespace, T.Comment.Single
  65. # Run over all stream tokens
  66. for ttype, value in stream:
  67. # Yield token if we finished a statement and there's no whitespaces
  68. # It will count newline token as a non whitespace. In this context
  69. # whitespace ignores newlines.
  70. # why don't multi line comments also count?
  71. if self.consume_ws and ttype not in EOS_TTYPE:
  72. yield sql.Statement(self.tokens)
  73. # Reset filter and prepare to process next statement
  74. self._reset()
  75. # Change current split level (increase, decrease or remain equal)
  76. self.level += self._change_splitlevel(ttype, value)
  77. # Append the token to the current statement
  78. self.tokens.append(sql.Token(ttype, value))
  79. # Check if we get the end of a statement
  80. if self.level <= 0 and ttype is T.Punctuation and value == ';':
  81. self.consume_ws = True
  82. # Yield pending statement (if any)
  83. if self.tokens and not all(t.is_whitespace for t in self.tokens):
  84. yield sql.Statement(self.tokens)