Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 41 additions & 35 deletions src/core/IronPython/Compiler/Parser.cs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public class Parser : IDisposable { // TODO: remove IDisposable
private SourceUnit _sourceUnit;

/// <summary>
/// Language features initialized on parser construction and possibly updated during parsing.
/// Language features initialized on parser construction and possibly updated during parsing.
/// The code can set the language features (e.g. "from __future__ import division").
/// </summary>
private ModuleOptions _languageFeatures;
Expand Down Expand Up @@ -376,7 +376,7 @@ internal void ReportSyntaxError(int start, int end, string message, int errorCod
Severity.FatalError);
}

#endregion
#endregion

#region LL(1) Parsing

Expand All @@ -403,7 +403,7 @@ private string ReadName() {
}

//stmt: simple_stmt | compound_stmt
//compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
//compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
private Statement ParseStmt() {
switch (PeekToken().Kind) {
case TokenKind.KeywordIf:
Expand Down Expand Up @@ -465,7 +465,7 @@ private Statement ParseSimpleStmt() {

/*
small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt

del_stmt: 'del' exprlist
pass_stmt: 'pass'
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
Expand Down Expand Up @@ -557,7 +557,7 @@ private Statement FinishSmallStmt(Statement stmt) {

// yield_stmt: yield_expr
private Statement ParseYieldStmt() {
// For yield statements, continue to enforce that it's currently in a function.
// For yield statements, continue to enforce that it's currently in a function.
// This gives us better syntax error reporting for yield-statements than for yield-expressions.
FunctionDefinition current = CurrentFunction;
if (current == null) {
Expand All @@ -577,7 +577,7 @@ private Statement ParseYieldStmt() {

/// <summary>
/// Peek if the next token is a 'yield' and parse a yield expression. Else return null.
///
///
/// Called w/ yield already eaten.
/// </summary>
/// <returns>A yield expression if present, else null. </returns>
Expand All @@ -589,7 +589,7 @@ private Expression ParseYieldExpression() {
// Mark that this function is actually a generator.
// If we're in a generator expression, then we don't have a function yet.
// g=((yield i) for i in range(5))
// In that acse, the genexp will mark IsGenerator.
// In that acse, the genexp will mark IsGenerator.
FunctionDefinition current = CurrentFunction;
if (current != null) {
current.IsGenerator = true;
Expand Down Expand Up @@ -761,7 +761,7 @@ private PythonOperator GetBinaryOperator(OperatorToken token) {
}
}

// import_stmt: 'import' module ['as' name"] (',' module ['as' name])*
// import_stmt: 'import' module ['as' name"] (',' module ['as' name])*
// name: identifier
private ImportStatement ParseImportStmt() {
Eat(TokenKind.KeywordImport);
Expand Down Expand Up @@ -834,8 +834,8 @@ private string[] ReadNames() {


// 'from' relative_module 'import' identifier ['as' name] (',' identifier ['as' name]) *
// 'from' relative_module 'import' '(' identifier ['as' name] (',' identifier ['as' name])* [','] ')'
// 'from' module 'import' "*"
// 'from' relative_module 'import' '(' identifier ['as' name] (',' identifier ['as' name])* [','] ')'
// 'from' module 'import' "*"
private FromImportStatement ParseFromImportStmt() {
Eat(TokenKind.KeywordFrom);
var start = GetStart();
Expand Down Expand Up @@ -1336,7 +1336,7 @@ private Expression FinishLambdef() {
return ParseLambdaHelperEnd(func, expr);
}

// Helpers for parsing lambda expressions.
// Helpers for parsing lambda expressions.
// Usage
// FunctionDefinition f = ParseLambdaHelperStart(string);
// Expression expr = ParseXYZ();
Expand All @@ -1357,7 +1357,7 @@ private FunctionDefinition ParseLambdaHelperStart(string name) {
}

private Expression ParseLambdaHelperEnd(FunctionDefinition func, Expression expr) {
// Pep 342 in Python 2.5 allows Yield Expressions, which can occur inside a Lambda body.
// Pep 342 in Python 2.5 allows Yield Expressions, which can occur inside a Lambda body.
// In this case, the lambda is a generator and will yield it's final result instead of just return it.
Statement body;
if (func.IsGenerator) {
Expand Down Expand Up @@ -1424,7 +1424,6 @@ private WithStatement ParseWithStmt() {
items.Add(ParseWithItem());
}


var header = GetEnd();
Statement body = ParseSuite();
if (items is not null) {
Expand Down Expand Up @@ -1456,16 +1455,18 @@ private WithItem ParseWithItem() {
}

// async_stmt: 'async' (funcdef | with_stmt | for_stmt)
private Statement ParseAsyncStmt() {
private Statement ParseAsyncStmt(bool onlyAllowDef = false) {
Eat(TokenKind.KeywordAsync);
var start = GetStart();

switch (PeekToken().Kind) {
case TokenKind.KeywordDef:
return ParseFuncDef(true);
case TokenKind.KeywordWith:
if (onlyAllowDef) goto default;
return ParseAsyncWithStmt(start);
case TokenKind.KeywordFor:
if (onlyAllowDef) goto default;
return ParseAsyncForStmt(start);
default:
ReportSyntaxError("invalid syntax");
Expand All @@ -1491,7 +1492,6 @@ private AsyncWithStatement ParseAsyncWithStmt(int asyncStart) {
items.Add(ParseWithItem());
}


var header = GetEnd();
Statement body = ParseSuite();
if (items is not null) {
Expand Down Expand Up @@ -2642,7 +2642,7 @@ private Expression ParseGeneratorExpression(Expression expr) {
// Generator Expressions have an implicit function definition and yield around their expression.
// (x for i in R)
// becomes:
// def f():
// def f():
// for i in R: yield (x)
ExpressionStatement ys = new ExpressionStatement(new YieldExpression(expr));
ys.Expression.SetLoc(_globalParent, expr.IndexSpan);
Expand Down Expand Up @@ -3151,13 +3151,13 @@ private PythonAst ParseFileWorker(bool makeModule, bool returnValue) {
List<Statement> l = new List<Statement>();

//
// A future statement must appear near the top of the module.
// The only lines that can appear before a future statement are:
// - the module docstring (if any),
// - comments,
// - blank lines, and
// - other future statements.
//
// A future statement must appear near the top of the module.
// The only lines that can appear before a future statement are:
// - the module docstring (if any),
// - comments,
// - blank lines, and
// - other future statements.
//

MaybeEatNewLine();

Expand Down Expand Up @@ -3231,6 +3231,12 @@ private Statement InternalParseInteractiveInput(out bool parsingMultiLineCmpdStm
}
return null;

case TokenKind.KeywordAsync:
parsingMultiLineCmpdStmt = true;
s = ParseAsyncStmt(onlyAllowDef: true);
EatEndOfInput();
break;

case TokenKind.KeywordIf:
case TokenKind.KeywordWhile:
case TokenKind.KeywordFor:
Expand Down Expand Up @@ -3268,11 +3274,11 @@ private Expression ParseTestListAsExpression() {
/// <summary>
/// Maybe eats a new line token returning true if the token was
/// eaten.
///
/// Python always tokenizes to have only 1 new line character in a
/// row. But we also craete NLToken's and ignore them except for
/// error reporting purposes. This gives us the same errors as
/// CPython and also matches the behavior of the standard library
///
/// Python always tokenizes to have only 1 new line character in a
/// row. But we also craete NLToken's and ignore them except for
/// error reporting purposes. This gives us the same errors as
/// CPython and also matches the behavior of the standard library
/// tokenize module. This function eats any present NL tokens and throws
/// them away.
/// </summary>
Expand All @@ -3285,12 +3291,12 @@ private bool MaybeEatNewLine() {
}

/// <summary>
/// Eats a new line token throwing if the next token isn't a new line.
///
/// Python always tokenizes to have only 1 new line character in a
/// row. But we also craete NLToken's and ignore them except for
/// error reporting purposes. This gives us the same errors as
/// CPython and also matches the behavior of the standard library
/// Eats a new line token throwing if the next token isn't a new line.
///
/// Python always tokenizes to have only 1 new line character in a
/// row. But we also craete NLToken's and ignore them except for
/// error reporting purposes. This gives us the same errors as
/// CPython and also matches the behavior of the standard library
/// tokenize module. This function eats any present NL tokens and throws
/// them away.
/// </summary>
Expand All @@ -3316,7 +3322,7 @@ private Token EatEndOfInput() {
if (_sourceReader.BaseReader is StreamReader sr && sr.BaseStream.CanSeek) {
// TODO: Convert exception index to proper SourceLocation
}
// BUG: We have some weird stream and we can't accurately track the
// BUG: We have some weird stream and we can't accurately track the
// position where the exception came from. There are too many levels
// of buffering below us to re-wind and calculate the actual line number, so
// we'll give the last line number the tokenizer was at.
Expand Down