Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
N
news
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Sartika Aritonang
news
Commits
c4e07ac3
Commit
c4e07ac3
authored
4 years ago
by
Sartika Aritonang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Upload New File
parent
fc587302
master
…
patch-6
No related merge requests found
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
125 additions
and
0 deletions
+125
-0
others.py
stbi/Lib/site-packages/Sqlparse/filters/others.py
+125
-0
No files found.
stbi/Lib/site-packages/Sqlparse/filters/others.py
0 → 100644
View file @
c4e07ac3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2018 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
from
sqlparse
import
sql
,
tokens
as
T
from
sqlparse.utils
import
split_unquoted_newlines
class
StripCommentsFilter
(
object
):
@staticmethod
def
_process
(
tlist
):
def
get_next_comment
():
# TODO(andi) Comment types should be unified, see related issue38
return
tlist
.
token_next_by
(
i
=
sql
.
Comment
,
t
=
T
.
Comment
)
tidx
,
token
=
get_next_comment
()
while
token
:
pidx
,
prev_
=
tlist
.
token_prev
(
tidx
,
skip_ws
=
False
)
nidx
,
next_
=
tlist
.
token_next
(
tidx
,
skip_ws
=
False
)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a parenthesis.
if
(
prev_
is
None
or
next_
is
None
or
prev_
.
is_whitespace
or
prev_
.
match
(
T
.
Punctuation
,
'('
)
or
next_
.
is_whitespace
or
next_
.
match
(
T
.
Punctuation
,
')'
)):
# Insert a whitespace to ensure the following SQL produces
# a valid SQL (see #425). For example:
#
# Before: select a--comment\nfrom foo
# After: select a from foo
if
prev_
is
not
None
and
next_
is
None
:
tlist
.
tokens
.
insert
(
tidx
,
sql
.
Token
(
T
.
Whitespace
,
' '
))
tlist
.
tokens
.
remove
(
token
)
else
:
tlist
.
tokens
[
tidx
]
=
sql
.
Token
(
T
.
Whitespace
,
' '
)
tidx
,
token
=
get_next_comment
()
def
process
(
self
,
stmt
):
[
self
.
process
(
sgroup
)
for
sgroup
in
stmt
.
get_sublists
()]
StripCommentsFilter
.
_process
(
stmt
)
return
stmt
class
StripWhitespaceFilter
(
object
):
def
_stripws
(
self
,
tlist
):
func_name
=
'_stripws_{cls}'
.
format
(
cls
=
type
(
tlist
)
.
__name__
)
func
=
getattr
(
self
,
func_name
.
lower
(),
self
.
_stripws_default
)
func
(
tlist
)
@staticmethod
def
_stripws_default
(
tlist
):
last_was_ws
=
False
is_first_char
=
True
for
token
in
tlist
.
tokens
:
if
token
.
is_whitespace
:
token
.
value
=
''
if
last_was_ws
or
is_first_char
else
' '
last_was_ws
=
token
.
is_whitespace
is_first_char
=
False
def
_stripws_identifierlist
(
self
,
tlist
):
# Removes newlines before commas, see issue140
last_nl
=
None
for
token
in
list
(
tlist
.
tokens
):
if
last_nl
and
token
.
ttype
is
T
.
Punctuation
and
token
.
value
==
','
:
tlist
.
tokens
.
remove
(
last_nl
)
last_nl
=
token
if
token
.
is_whitespace
else
None
# next_ = tlist.token_next(token, skip_ws=False)
# if (next_ and not next_.is_whitespace and
# token.ttype is T.Punctuation and token.value == ','):
# tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
return
self
.
_stripws_default
(
tlist
)
def
_stripws_parenthesis
(
self
,
tlist
):
while
tlist
.
tokens
[
1
]
.
is_whitespace
:
tlist
.
tokens
.
pop
(
1
)
while
tlist
.
tokens
[
-
2
]
.
is_whitespace
:
tlist
.
tokens
.
pop
(
-
2
)
self
.
_stripws_default
(
tlist
)
def
process
(
self
,
stmt
,
depth
=
0
):
[
self
.
process
(
sgroup
,
depth
+
1
)
for
sgroup
in
stmt
.
get_sublists
()]
self
.
_stripws
(
stmt
)
if
depth
==
0
and
stmt
.
tokens
and
stmt
.
tokens
[
-
1
]
.
is_whitespace
:
stmt
.
tokens
.
pop
(
-
1
)
return
stmt
class
SpacesAroundOperatorsFilter
(
object
):
@staticmethod
def
_process
(
tlist
):
ttypes
=
(
T
.
Operator
,
T
.
Comparison
)
tidx
,
token
=
tlist
.
token_next_by
(
t
=
ttypes
)
while
token
:
nidx
,
next_
=
tlist
.
token_next
(
tidx
,
skip_ws
=
False
)
if
next_
and
next_
.
ttype
!=
T
.
Whitespace
:
tlist
.
insert_after
(
tidx
,
sql
.
Token
(
T
.
Whitespace
,
' '
))
pidx
,
prev_
=
tlist
.
token_prev
(
tidx
,
skip_ws
=
False
)
if
prev_
and
prev_
.
ttype
!=
T
.
Whitespace
:
tlist
.
insert_before
(
tidx
,
sql
.
Token
(
T
.
Whitespace
,
' '
))
tidx
+=
1
# has to shift since token inserted before it
# assert tlist.token_index(token) == tidx
tidx
,
token
=
tlist
.
token_next_by
(
t
=
ttypes
,
idx
=
tidx
)
def
process
(
self
,
stmt
):
[
self
.
process
(
sgroup
)
for
sgroup
in
stmt
.
get_sublists
()]
SpacesAroundOperatorsFilter
.
_process
(
stmt
)
return
stmt
# ---------------------------
# postprocess
class
SerializerUnicode
(
object
):
@staticmethod
def
process
(
stmt
):
lines
=
split_unquoted_newlines
(
stmt
)
return
'
\n
'
.
join
(
line
.
rstrip
()
for
line
in
lines
)
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment