add support for parsing parameterless lambdas

This commit is contained in:
Dave Holoway
2020-06-16 15:42:48 +01:00
parent 8b71037a58
commit 9ac6086bad
2 changed files with 29 additions and 2 deletions

View File

@@ -522,6 +522,18 @@ class BracketedExpression extends Expression {
this.expression = expression; this.expression = expression;
} }
} }
class LambdaExpression extends Expression {
/**
*
* @param {*[]} params
* @param {Expression|Block} body
*/
constructor(params, body) {
super();
this.params = params;
this.body = body;
}
}
class IncDecExpression extends Expression { class IncDecExpression extends Expression {
/** /**
* @param {ResolvedIdent} expr * @param {ResolvedIdent} expr
@@ -1674,6 +1686,19 @@ function rootTerm(tokens, mdecls, scope, imports, typemap) {
return newTerm(tokens, mdecls, scope, imports, typemap); return newTerm(tokens, mdecls, scope, imports, typemap);
case 'open-bracket': case 'open-bracket':
tokens.inc(); tokens.inc();
if (tokens.isValue(')')) {
// parameterless lambda
tokens.expectValue('->');
let ident, lambdaBody = null;
if (tokens.current.value === '{') {
// todo - parse lambda body
skipBody(tokens);
} else {
lambdaBody = expression(tokens, mdecls, scope, imports, typemap);
ident = `() -> ${lambdaBody.source}`;
}
return new ResolvedIdent(ident, [new LambdaExpression([], lambdaBody)]);
}
matches = expression(tokens, mdecls, scope, imports, typemap); matches = expression(tokens, mdecls, scope, imports, typemap);
tokens.expectValue(')'); tokens.expectValue(')');
if (isCastExpression(tokens.current, matches)) { if (isCastExpression(tokens.current, matches)) {

View File

@@ -90,7 +90,7 @@ class Token extends TextBlock {
*/ */
function tokenize(source, offset = 0, length = source.length) { function tokenize(source, offset = 0, length = source.length) {
const text = source.slice(offset, offset + length); const text = source.slice(offset, offset + length);
const raw_token_re = /(\s+|\/\/.*|\/\*[\d\D]*?\*\/|\/\*[\d\D]*)|("[^\r\n\\"]*(?:\\.[^\r\n\\"]*)*"|".*)|('\\u[\da-fA-F]{0,4}'?|'\\?.?'?)|(\.?\d)|([\p{L}\p{N}$_]+)|(\()|([;,?:(){}\[\]@]|\.(?:\.\.)?)|([!=/%*^]=?|<<?=?|>>?>?=?|&[&=]?|\|[|=]?|(\+\+|--)|[+-]=?|~)|$/gu; const raw_token_re = /(\s+|\/\/.*|\/\*[\d\D]*?\*\/|\/\*[\d\D]*)|("[^\r\n\\"]*(?:\\.[^\r\n\\"]*)*"|".*)|('\\u[\da-fA-F]{0,4}'?|'\\?.?'?)|(\.?\d)|([\p{L}\p{N}$_]+)|(\()|([;,?:(){}\[\]@]|\.(?:\.\.)?)|([!=/%*^]=?|<<?=?|>>?>?=?|&[&=]?|\|[|=]?|(\+\+|--)|->|[+-]=?|~)|$/gu;
const raw_token_types = [ const raw_token_types = [
'wsc', 'wsc',
'string-literal', 'string-literal',
@@ -209,7 +209,7 @@ function tokenize(source, offset = 0, length = source.length) {
* [~!] unary * [~!] unary
* ``` * ```
*/ */
const operator_re = /^(?:(=|[/%*&|^+-]=|>>>?=|<<=)|(\+\+|--)|([!=]=)|([<>]=?)|([&|^])|(<<|>>>?)|(&&|[|][|])|([*%/])|([+-])|([~!]))$/; const operator_re = /^(?:(=|[/%*&|^+-]=|>>>?=|<<=)|(\+\+|--)|([!=]=)|([<>]=?)|([&|^])|(<<|>>>?)|(&&|[|][|])|([*%/])|(->)|([+-])|([~!]))$/;
/** /**
* @typedef { * @typedef {
'assignment-operator'| 'assignment-operator'|
@@ -220,6 +220,7 @@ const operator_re = /^(?:(=|[/%*&|^+-]=|>>>?=|<<=)|(\+\+|--)|([!=]=)|([<>]=?)|([
'shift-operator'| 'shift-operator'|
'logical-operator'| 'logical-operator'|
'muldiv-operator'| 'muldiv-operator'|
'lambda-operator'|
'plumin-operator'| 'plumin-operator'|
'unary-operator'} OperatorKind 'unary-operator'} OperatorKind
*/ */
@@ -233,6 +234,7 @@ const operator_token_types = [
'shift-operator', 'shift-operator',
'logical-operator', 'logical-operator',
'muldiv-operator', 'muldiv-operator',
'lambda-operator',
'plumin-operator', 'plumin-operator',
'unary-operator', 'unary-operator',
] ]