Skip to content

Commit 4ce55a2

Browse files
authored
[3.10] bpo-45408: Don't override previous tokenizer errors in the second parser pass (GH-28812). (GH-28813)
(cherry picked from commit 0219017) Co-authored-by: Pablo Galindo Salgado <[email protected]>
1 parent eabca6e commit 4ce55a2

File tree

4 files changed

+15
-2
lines changed

4 files changed

+15
-2
lines changed

Lib/test/test_ast.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,6 +1044,14 @@ def test_literal_eval_malformed_lineno(self):
10441044
with self.assertRaisesRegex(ValueError, msg):
10451045
ast.literal_eval(node)
10461046

1047+
def test_literal_eval_syntax_errors(self):
1048+
msg = "unexpected character after line continuation character"
1049+
with self.assertRaisesRegex(SyntaxError, msg):
1050+
ast.literal_eval(r'''
1051+
\
1052+
(\
1053+
\ ''')
1054+
10471055
def test_bad_integer(self):
10481056
# issue13436: Bad error message with invalid numeric values
10491057
body = [ast.ImportFrom(module='time',

Lib/test/test_exceptions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ def testSyntaxErrorOffset(self):
223223
check('x = "a', 1, 5)
224224
check('lambda x: x = 2', 1, 1)
225225
check('f{a + b + c}', 1, 2)
226-
check('[file for str(file) in []\n])', 2, 2)
226+
check('[file for str(file) in []\n])', 1, 11)
227227
check('a = « hello » « world »', 1, 5)
228228
check('[\nfile\nfor str(file)\nin\n[]\n]', 3, 5)
229229
check('[file for\n str(file) in []]', 2, 2)
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Fix a crash in the parser when reporting tokenizer errors that occur at the
2+
same time unclosed parentheses are detected. Patch by Pablo Galindo.

Parser/pegen.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1321,13 +1321,16 @@ _PyPegen_run_parser(Parser *p)
13211321
{
13221322
void *res = _PyPegen_parse(p);
13231323
if (res == NULL) {
1324+
if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_SyntaxError)) {
1325+
return NULL;
1326+
}
13241327
Token *last_token = p->tokens[p->fill - 1];
13251328
reset_parser_state(p);
13261329
_PyPegen_parse(p);
13271330
if (PyErr_Occurred()) {
13281331
// Prioritize tokenizer errors to custom syntax errors raised
13291332
// on the second phase only if the errors come from the parser.
1330-
if (p->tok->done != E_ERROR && PyErr_ExceptionMatches(PyExc_SyntaxError)) {
1333+
if (p->tok->done == E_DONE && PyErr_ExceptionMatches(PyExc_SyntaxError)) {
13311334
_PyPegen_check_tokenizer_errors(p);
13321335
}
13331336
return NULL;

0 commit comments

Comments
 (0)