diff --git a/src/core/IronPython/Compiler/Parser.cs b/src/core/IronPython/Compiler/Parser.cs
index f76f0e161..c1ed3e4eb 100644
--- a/src/core/IronPython/Compiler/Parser.cs
+++ b/src/core/IronPython/Compiler/Parser.cs
@@ -45,7 +45,7 @@ public class Parser : IDisposable { // TODO: remove IDisposable
private SourceUnit _sourceUnit;
///
- /// Language features initialized on parser construction and possibly updated during parsing.
+ /// Language features initialized on parser construction and possibly updated during parsing.
/// The code can set the language features (e.g. "from __future__ import division").
///
private ModuleOptions _languageFeatures;
@@ -376,7 +376,7 @@ internal void ReportSyntaxError(int start, int end, string message, int errorCod
Severity.FatalError);
}
- #endregion
+ #endregion
#region LL(1) Parsing
@@ -403,7 +403,7 @@ private string ReadName() {
}
//stmt: simple_stmt | compound_stmt
- //compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
+ //compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
private Statement ParseStmt() {
switch (PeekToken().Kind) {
case TokenKind.KeywordIf:
@@ -465,7 +465,7 @@ private Statement ParseSimpleStmt() {
/*
small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt
-
+
del_stmt: 'del' exprlist
pass_stmt: 'pass'
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
@@ -557,7 +557,7 @@ private Statement FinishSmallStmt(Statement stmt) {
// yield_stmt: yield_expr
private Statement ParseYieldStmt() {
- // For yield statements, continue to enforce that it's currently in a function.
+ // For yield statements, continue to enforce that it's currently in a function.
// This gives us better syntax error reporting for yield-statements than for yield-expressions.
FunctionDefinition current = CurrentFunction;
if (current == null) {
@@ -577,7 +577,7 @@ private Statement ParseYieldStmt() {
///
/// Peek if the next token is a 'yield' and parse a yield expression. Else return null.
- ///
+ ///
/// Called w/ yield already eaten.
///
/// A yield expression if present, else null.
@@ -589,7 +589,7 @@ private Expression ParseYieldExpression() {
// Mark that this function is actually a generator.
// If we're in a generator expression, then we don't have a function yet.
// g=((yield i) for i in range(5))
- // In that acse, the genexp will mark IsGenerator.
+ // In that acse, the genexp will mark IsGenerator.
FunctionDefinition current = CurrentFunction;
if (current != null) {
current.IsGenerator = true;
@@ -761,7 +761,7 @@ private PythonOperator GetBinaryOperator(OperatorToken token) {
}
}
- // import_stmt: 'import' module ['as' name"] (',' module ['as' name])*
+ // import_stmt: 'import' module ['as' name"] (',' module ['as' name])*
// name: identifier
private ImportStatement ParseImportStmt() {
Eat(TokenKind.KeywordImport);
@@ -834,8 +834,8 @@ private string[] ReadNames() {
// 'from' relative_module 'import' identifier ['as' name] (',' identifier ['as' name]) *
- // 'from' relative_module 'import' '(' identifier ['as' name] (',' identifier ['as' name])* [','] ')'
- // 'from' module 'import' "*"
+ // 'from' relative_module 'import' '(' identifier ['as' name] (',' identifier ['as' name])* [','] ')'
+ // 'from' module 'import' "*"
private FromImportStatement ParseFromImportStmt() {
Eat(TokenKind.KeywordFrom);
var start = GetStart();
@@ -1336,7 +1336,7 @@ private Expression FinishLambdef() {
return ParseLambdaHelperEnd(func, expr);
}
- // Helpers for parsing lambda expressions.
+ // Helpers for parsing lambda expressions.
// Usage
// FunctionDefinition f = ParseLambdaHelperStart(string);
// Expression expr = ParseXYZ();
@@ -1357,7 +1357,7 @@ private FunctionDefinition ParseLambdaHelperStart(string name) {
}
private Expression ParseLambdaHelperEnd(FunctionDefinition func, Expression expr) {
- // Pep 342 in Python 2.5 allows Yield Expressions, which can occur inside a Lambda body.
+ // Pep 342 in Python 2.5 allows Yield Expressions, which can occur inside a Lambda body.
// In this case, the lambda is a generator and will yield it's final result instead of just return it.
Statement body;
if (func.IsGenerator) {
@@ -1424,7 +1424,6 @@ private WithStatement ParseWithStmt() {
items.Add(ParseWithItem());
}
-
var header = GetEnd();
Statement body = ParseSuite();
if (items is not null) {
@@ -1456,7 +1455,7 @@ private WithItem ParseWithItem() {
}
// async_stmt: 'async' (funcdef | with_stmt | for_stmt)
- private Statement ParseAsyncStmt() {
+ private Statement ParseAsyncStmt(bool onlyAllowDef = false) {
Eat(TokenKind.KeywordAsync);
var start = GetStart();
@@ -1464,8 +1463,10 @@ private Statement ParseAsyncStmt() {
case TokenKind.KeywordDef:
return ParseFuncDef(true);
case TokenKind.KeywordWith:
+ if (onlyAllowDef) goto default;
return ParseAsyncWithStmt(start);
case TokenKind.KeywordFor:
+ if (onlyAllowDef) goto default;
return ParseAsyncForStmt(start);
default:
ReportSyntaxError("invalid syntax");
@@ -1491,7 +1492,6 @@ private AsyncWithStatement ParseAsyncWithStmt(int asyncStart) {
items.Add(ParseWithItem());
}
-
var header = GetEnd();
Statement body = ParseSuite();
if (items is not null) {
@@ -2642,7 +2642,7 @@ private Expression ParseGeneratorExpression(Expression expr) {
// Generator Expressions have an implicit function definition and yield around their expression.
// (x for i in R)
// becomes:
- // def f():
+ // def f():
// for i in R: yield (x)
ExpressionStatement ys = new ExpressionStatement(new YieldExpression(expr));
ys.Expression.SetLoc(_globalParent, expr.IndexSpan);
@@ -3151,13 +3151,13 @@ private PythonAst ParseFileWorker(bool makeModule, bool returnValue) {
List l = new List();
//
- // A future statement must appear near the top of the module.
- // The only lines that can appear before a future statement are:
- // - the module docstring (if any),
- // - comments,
- // - blank lines, and
- // - other future statements.
- //
+ // A future statement must appear near the top of the module.
+ // The only lines that can appear before a future statement are:
+ // - the module docstring (if any),
+ // - comments,
+ // - blank lines, and
+ // - other future statements.
+ //
MaybeEatNewLine();
@@ -3231,6 +3231,12 @@ private Statement InternalParseInteractiveInput(out bool parsingMultiLineCmpdStm
}
return null;
+ case TokenKind.KeywordAsync:
+ parsingMultiLineCmpdStmt = true;
+ s = ParseAsyncStmt(onlyAllowDef: true);
+ EatEndOfInput();
+ break;
+
case TokenKind.KeywordIf:
case TokenKind.KeywordWhile:
case TokenKind.KeywordFor:
@@ -3268,11 +3274,11 @@ private Expression ParseTestListAsExpression() {
///
/// Maybe eats a new line token returning true if the token was
/// eaten.
- ///
- /// Python always tokenizes to have only 1 new line character in a
- /// row. But we also craete NLToken's and ignore them except for
- /// error reporting purposes. This gives us the same errors as
- /// CPython and also matches the behavior of the standard library
+ ///
+ /// Python always tokenizes to have only 1 new line character in a
+ /// row. But we also craete NLToken's and ignore them except for
+ /// error reporting purposes. This gives us the same errors as
+ /// CPython and also matches the behavior of the standard library
/// tokenize module. This function eats any present NL tokens and throws
/// them away.
///
@@ -3285,12 +3291,12 @@ private bool MaybeEatNewLine() {
}
///
- /// Eats a new line token throwing if the next token isn't a new line.
- ///
- /// Python always tokenizes to have only 1 new line character in a
- /// row. But we also craete NLToken's and ignore them except for
- /// error reporting purposes. This gives us the same errors as
- /// CPython and also matches the behavior of the standard library
+ /// Eats a new line token throwing if the next token isn't a new line.
+ ///
+ /// Python always tokenizes to have only 1 new line character in a
+ /// row. But we also craete NLToken's and ignore them except for
+ /// error reporting purposes. This gives us the same errors as
+ /// CPython and also matches the behavior of the standard library
/// tokenize module. This function eats any present NL tokens and throws
/// them away.
///
@@ -3316,7 +3322,7 @@ private Token EatEndOfInput() {
if (_sourceReader.BaseReader is StreamReader sr && sr.BaseStream.CanSeek) {
// TODO: Convert exception index to proper SourceLocation
}
- // BUG: We have some weird stream and we can't accurately track the
+ // BUG: We have some weird stream and we can't accurately track the
// position where the exception came from. There are too many levels
// of buffering below us to re-wind and calculate the actual line number, so
// we'll give the last line number the tokenizer was at.