Skip to content

Commit 51d0cdc

Browse files
bpo-39219: Fix SyntaxError attributes in the tokenizer.
* Always set the text attribute. * Correct the offset attribute for non-ascii sources.
1 parent ec007cb commit 51d0cdc

File tree

3 files changed

+47
-5
lines changed

3 files changed

+47
-5
lines changed

Lib/test/test_exceptions.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,17 +179,25 @@ def ckmsg(src, msg, exception=SyntaxError):
179179
ckmsg(s, "inconsistent use of tabs and spaces in indentation", TabError)
180180

181181
def testSyntaxErrorOffset(self):
182-
def check(src, lineno, offset):
182+
def check(src, lineno, offset, encoding='utf-8'):
183183
with self.assertRaises(SyntaxError) as cm:
184184
compile(src, '<fragment>', 'exec')
185185
self.assertEqual(cm.exception.lineno, lineno)
186186
self.assertEqual(cm.exception.offset, offset)
187+
if cm.exception.text is not None:
188+
if not isinstance(src, str):
189+
src = src.decode(encoding, 'replace')
190+
line = src.split('\n')[lineno-1]
191+
self.assertEqual(cm.exception.text.rstrip('\n'), line)
187192

188193
check('def fact(x):\n\treturn x!\n', 2, 10)
189194
check('1 +\n', 1, 4)
190195
check('def spam():\n print(1)\n print(2)', 3, 10)
191196
check('Python = "Python" +', 1, 20)
192197
check('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', 1, 20)
198+
check(b'# -*- coding: cp1251 -*-\nPython = "\xcf\xb3\xf2\xee\xed" +',
199+
2, 19, encoding='cp1251')
200+
check(b'Python = "\xcf\xb3\xf2\xee\xed" +', 1, 18)
193201
check('x = "a', 1, 7)
194202
check('lambda x: x = 2', 1, 1)
195203

@@ -205,6 +213,10 @@ def check(src, lineno, offset):
205213
check('0010 + 2', 1, 4)
206214
check('x = 32e-+4', 1, 8)
207215
check('x = 0o9', 1, 6)
216+
check('\u03b1 = 0xI', 1, 6)
217+
check(b'\xce\xb1 = 0xI', 1, 6)
218+
check(b'# -*- coding: iso8859-7 -*-\n\xe1 = 0xI', 2, 6,
219+
encoding='iso8859-7')
208220

209221
# Errors thrown by symtable.c
210222
check('x = [(yield i) for i in range(3)]', 1, 5)
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Syntax errors raised in the tokenizer now always set correct "text" and
2+
"offset" attributes.

Parser/tokenizer.c

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11

22
/* Tokenizer implementation */
33

4+
#define PY_SSIZE_T_CLEAN
45
#include "Python.h"
56

67
#include <ctype.h>
@@ -1032,17 +1033,44 @@ tok_backup(struct tok_state *tok, int c)
10321033
static int
10331034
syntaxerror(struct tok_state *tok, const char *format, ...)
10341035
{
1036+
PyObject *errmsg, *errtext, *args;
10351037
va_list vargs;
10361038
#ifdef HAVE_STDARG_PROTOTYPES
10371039
va_start(vargs, format);
10381040
#else
10391041
va_start(vargs);
10401042
#endif
1041-
PyErr_FormatV(PyExc_SyntaxError, format, vargs);
1043+
errmsg = PyUnicode_FromFormatV(format, vargs);
10421044
va_end(vargs);
1043-
PyErr_SyntaxLocationObject(tok->filename,
1044-
tok->lineno,
1045-
(int)(tok->cur - tok->line_start));
1045+
if (!errmsg) {
1046+
goto error;
1047+
}
1048+
1049+
errtext = PyUnicode_DecodeUTF8(tok->line_start, tok->cur - tok->line_start,
1050+
"replace");
1051+
if (!errtext) {
1052+
goto error;
1053+
}
1054+
int offset = (int)PyUnicode_GET_LENGTH(errtext);
1055+
Py_ssize_t line_len = strcspn(tok->line_start, "\n");
1056+
if (line_len != tok->cur - tok->line_start) {
1057+
Py_DECREF(errtext);
1058+
errtext = PyUnicode_DecodeUTF8(tok->line_start, line_len,
1059+
"replace");
1060+
}
1061+
if (!errtext) {
1062+
goto error;
1063+
}
1064+
1065+
args = Py_BuildValue("(O(OiiN))", errmsg,
1066+
tok->filename, tok->lineno, offset, errtext);
1067+
if (args) {
1068+
PyErr_SetObject(PyExc_SyntaxError, args);
1069+
Py_DECREF(args);
1070+
}
1071+
1072+
error:
1073+
Py_XDECREF(errmsg);
10461074
tok->done = E_ERROR;
10471075
return ERRORTOKEN;
10481076
}

0 commit comments

Comments
 (0)