Coverage for tests / test_phystokens.py: 100.000%
109 statements
« prev ^ index » next coverage.py v7.12.1a0.dev1, created at 2025-11-30 17:57 +0000
« prev ^ index » next coverage.py v7.12.1a0.dev1, created at 2025-11-30 17:57 +0000
1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
2# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
4"""Tests for coverage.py's improved tokenizer."""
6from __future__ import annotations 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
8import os.path 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
9import re 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
10import sys 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
11import textwrap 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
12import warnings 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
14import pytest 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
16from coverage import env 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
17from coverage.phystokens import source_token_lines, source_encoding 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
18from coverage.python import get_python_source 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
20from tests.coveragetest import CoverageTest, TESTS_DIR 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
23# A simple program and its token stream.
24SIMPLE = """\ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
25# yay!
26def foo():
27 say('two = %d' % 2)
28"""
30SIMPLE_TOKENS = [ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
31 [("com", "# yay!")],
32 [("key", "def"), ("ws", " "), ("nam", "foo"), ("op", "("), ("op", ")"), ("op", ":")],
33 [
34 ("ws", " "),
35 ("nam", "say"),
36 ("op", "("),
37 ("str", "'two = %d'"),
38 ("ws", " "),
39 ("op", "%"),
40 ("ws", " "),
41 ("num", "2"),
42 ("op", ")"),
43 ],
44]
46# Mixed-white-space program, and its token stream.
47MIXED_WS = """\ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
48def hello():
49 a="Hello world!"
50\tb="indented"
51"""
53MIXED_WS_TOKENS = [ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
54 [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ("op", ")"), ("op", ":")],
55 [("ws", " "), ("nam", "a"), ("op", "="), ("str", '"Hello world!"')],
56 [("ws", " "), ("nam", "b"), ("op", "="), ("str", '"indented"')],
57]
59# https://github.com/coveragepy/coveragepy/issues/822
60BUG_822 = """\ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
61print( "Message 1" )
62array = [ 1,2,3,4, # 4 numbers \\
63 5,6,7 ] # 3 numbers
64print( "Message 2" )
65"""
68class PhysTokensTest(CoverageTest): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
69 """Tests for coverage.py's improved tokenizer."""
71 run_in_temp_dir = False 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
73 def check_tokenization(self, source: str) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
74 """Tokenize `source`, then put it back together, should be the same."""
75 tokenized = "" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
76 for line in source_token_lines(source): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
77 text = "".join(t for _, t in line) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
78 tokenized += text + "\n" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
79 # source_token_lines doesn't preserve trailing spaces, so trim all that
80 # before comparing.
81 source = source.replace("\r\n", "\n") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
82 source = re.sub(r"(?m)[ \t]+$", "", source) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
83 tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
84 assert source == tokenized 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
86 def check_file_tokenization(self, fname: str) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
87 """Use the contents of `fname` for `check_tokenization`."""
88 self.check_tokenization(get_python_source(fname)) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
90 def test_simple(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
91 assert list(source_token_lines(SIMPLE)) == SIMPLE_TOKENS 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
92 self.check_tokenization(SIMPLE) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
94 def test_missing_final_newline(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
95 # We can tokenize source that is missing the final newline.
96 assert list(source_token_lines(SIMPLE.rstrip())) == SIMPLE_TOKENS 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
98 def test_tab_indentation(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
99 # Mixed tabs and spaces...
100 assert list(source_token_lines(MIXED_WS)) == MIXED_WS_TOKENS 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
102 def test_bug_822(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
103 self.check_tokenization(BUG_822) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
105 def test_tokenize_real_file(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
106 # Check the tokenization of a real file (large, btw).
107 real_file = os.path.join(TESTS_DIR, "test_coverage.py") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
108 self.check_file_tokenization(real_file) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
110 def test_1828(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
111 # https://github.com/coveragepy/coveragepy/pull/1828
112 tokens = list( 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
113 source_token_lines(
114 textwrap.dedent("""
115 x = \
116 1
117 a = ["aaa",\\
118 "bbb \\
119 ccc"]
120 """)
121 )
122 )
123 assert tokens == [ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
124 [],
125 [("nam", "x"), ("ws", " "), ("op", "="), ("ws", " "), ("num", "1")],
126 [
127 ("nam", "a"),
128 ("ws", " "),
129 ("op", "="),
130 ("ws", " "),
131 ("op", "["),
132 ("str", '"aaa"'),
133 ("op", ","),
134 ("xx", "\\"),
135 ],
136 [("ws", " "), ("str", '"bbb \\')],
137 [("str", ' ccc"'), ("op", "]")],
138 ]
140 @pytest.mark.parametrize( 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
141 "fname",
142 [
143 "stress_phystoken.tok",
144 "stress_phystoken_dos.tok",
145 ],
146 )
147 def test_stress(self, fname: str) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
148 # Check the tokenization of the stress-test files.
149 # And check that those files haven't been incorrectly "fixed".
150 with warnings.catch_warnings(): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
151 warnings.filterwarnings("ignore", message=r".*invalid escape sequence") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
153 stress = os.path.join(TESTS_DIR, fname) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
154 self.check_file_tokenization(stress) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
155 with open(stress, encoding="utf-8") as fstress: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
156 assert re.search(r"(?m) $", fstress.read()), f"{stress} needs a trailing space." 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
158 def test_fstring_middle(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
159 tokens = list( 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
160 source_token_lines(
161 textwrap.dedent("""\
162 f'Look: {x} {{x}}!'
163 """)
164 )
165 )
166 if env.PYBEHAVIOR.fstring_syntax: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
167 assert tokens == [ 1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
168 [
169 ("fst", "f'"),
170 ("fst", "Look: "),
171 ("op", "{"),
172 ("nam", "x"),
173 ("op", "}"),
174 ("fst", " {{"),
175 ("fst", "x}}"),
176 ("fst", "!"),
177 ("fst", "'"),
178 ],
179 ]
180 else:
181 assert tokens == [[("str", "f'Look: {x} {{x}}!'")]] 123456789!#$%'()
184class SoftKeywordTest(CoverageTest): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
185 """Tests the tokenizer handling soft keywords."""
187 run_in_temp_dir = False 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
189 def test_soft_keywords_match_case(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
190 source = textwrap.dedent("""\ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
191 match re.match(something):
192 case ["what"]:
193 match = case("hello")
194 case [_]:
195 match("hello")
196 match another.thing:
197 case 1:
198 pass
200 class case(): pass
201 def match():
202 global case
203 """)
204 tokens = list(source_token_lines(source)) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
205 assert tokens[0][0] == ("key", "match") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
206 assert tokens[0][4] == ("nam", "match") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
207 assert tokens[1][1] == ("key", "case") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
208 assert tokens[2][1] == ("nam", "match") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
209 assert tokens[2][5] == ("nam", "case") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
210 assert tokens[3][1] == ("key", "case") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
211 assert tokens[4][1] == ("nam", "match") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
212 assert tokens[5][1] == ("key", "match") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
213 assert tokens[6][1] == ("key", "case") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
214 assert tokens[9][2] == ("nam", "case") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
215 assert tokens[10][2] == ("nam", "match") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
216 assert tokens[11][3] == ("nam", "case") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
218 @pytest.mark.skipif(sys.version_info < (3, 12), reason="type isn't a soft keyword until 3.12") 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
219 def test_soft_keyword_type(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
220 source = textwrap.dedent("""\ 1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
221 type Point = tuple[float, float]
222 type(int)
223 """)
224 tokens = list(source_token_lines(source)) 1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
225 assert tokens[0][0] == ("key", "type") 1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
226 assert tokens[1][0] == ("nam", "type") 1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
229# The default source file encoding.
230DEF_ENCODING = "utf-8" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
233ENCODING_DECLARATION_SOURCES = [ 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
234 # Various forms from http://www.python.org/dev/peps/pep-0263/
235 (1, b"# coding=cp850\n\n", "cp850"),
236 (1, b"# coding=latin-1\n", "iso-8859-1"),
237 (1, b"# coding=iso-latin-1\n", "iso-8859-1"),
238 (1, b"#!/usr/bin/python\n# -*- coding: cp850 -*-\n", "cp850"),
239 (1, b"#!/usr/bin/python\n# vim: set fileencoding=cp850:\n", "cp850"),
240 (1, b"# This Python file uses this encoding: cp850\n", "cp850"),
241 (1, b"# This file uses a different encoding:\n# coding: cp850\n", "cp850"),
242 (1, b"\n# coding=cp850\n\n", "cp850"),
243 (2, b"# -*- coding:cp850 -*-\n# vim: fileencoding=cp850\n", "cp850"),
244]
247class SourceEncodingTest(CoverageTest): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
248 """Tests of source_encoding() for detecting encodings."""
250 run_in_temp_dir = False 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
252 def test_detect_source_encoding(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
253 for _, source, expected in ENCODING_DECLARATION_SOURCES: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
254 assert source_encoding(source) == expected, f"Wrong encoding in {source!r}" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
256 def test_detect_source_encoding_not_in_comment(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
257 # Should not detect anything here
258 source = b"def parse(src, encoding=None):\n pass" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
259 assert source_encoding(source) == DEF_ENCODING 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
261 def test_dont_detect_source_encoding_on_third_line(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
262 # A coding declaration doesn't count on the third line.
263 source = b"\n\n# coding=cp850\n\n" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
264 assert source_encoding(source) == DEF_ENCODING 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
266 def test_detect_source_encoding_of_empty_file(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
267 # An important edge case.
268 assert source_encoding(b"") == DEF_ENCODING 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
270 def test_bom(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
271 # A BOM means utf-8.
272 source = b"\xef\xbb\xbftext = 'hello'\n" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
273 assert source_encoding(source) == "utf-8-sig" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
275 def test_bom_with_encoding(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
276 source = b"\xef\xbb\xbf# coding: utf-8\ntext = 'hello'\n" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
277 assert source_encoding(source) == "utf-8-sig" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
279 def test_bom_is_wrong(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
280 # A BOM with an explicit non-utf8 encoding is an error.
281 source = b"\xef\xbb\xbf# coding: cp850\n" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
282 with pytest.raises(SyntaxError, match="encoding problem: utf-8"): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
283 source_encoding(source) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
285 def test_unknown_encoding(self) -> None: 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
286 source = b"# coding: klingon\n" 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
287 with pytest.raises(SyntaxError, match="unknown encoding: klingon"): 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()
288 source_encoding(source) 123456789!#$%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01'()