From 9ac6086bad61aae32cd5353b5f4ad3af5dc1ee3e Mon Sep 17 00:00:00 2001 From: Dave Holoway Date: Tue, 16 Jun 2020 15:42:48 +0100 Subject: [PATCH] add support for parsing parameterless lambdas --- langserver/java/body-parser3.js | 25 +++++++++++++++++++++++++ langserver/java/tokenizer.js | 6 ++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/langserver/java/body-parser3.js b/langserver/java/body-parser3.js index f144942..58f1885 100644 --- a/langserver/java/body-parser3.js +++ b/langserver/java/body-parser3.js @@ -522,6 +522,18 @@ class BracketedExpression extends Expression { this.expression = expression; } } +class LambdaExpression extends Expression { + /** + * + * @param {*[]} params + * @param {Expression|Block} body + */ + constructor(params, body) { + super(); + this.params = params; + this.body = body; + } +} class IncDecExpression extends Expression { /** * @param {ResolvedIdent} expr @@ -1674,6 +1686,19 @@ function rootTerm(tokens, mdecls, scope, imports, typemap) { return newTerm(tokens, mdecls, scope, imports, typemap); case 'open-bracket': tokens.inc(); + if (tokens.isValue(')')) { + // parameterless lambda + tokens.expectValue('->'); + let ident, lambdaBody = null; + if (tokens.current.value === '{') { + // todo - parse lambda body + skipBody(tokens); + } else { + lambdaBody = expression(tokens, mdecls, scope, imports, typemap); + ident = `() -> ${lambdaBody.source}`; + } + return new ResolvedIdent(ident, [new LambdaExpression([], lambdaBody)]); + } matches = expression(tokens, mdecls, scope, imports, typemap); tokens.expectValue(')'); if (isCastExpression(tokens.current, matches)) { diff --git a/langserver/java/tokenizer.js b/langserver/java/tokenizer.js index e43a405..3e65ee4 100644 --- a/langserver/java/tokenizer.js +++ b/langserver/java/tokenizer.js @@ -90,7 +90,7 @@ class Token extends TextBlock { */ function tokenize(source, offset = 0, length = source.length) { const text = source.slice(offset, offset + length); - const raw_token_re = /(\s+|\/\/.*|\/\*[\d\D]*?\*\/|\/\*[\d\D]*)|("[^\r\n\\"]*(?:\\.[^\r\n\\"]*)*"|".*)|('\\u[\da-fA-F]{0,4}'?|'\\?.?'?)|(\.?\d)|([\p{L}\p{N}$_]+)|(\()|([;,?:(){}\[\]@]|\.(?:\.\.)?)|([!=/%*^]=?|<>?>?=?|&[&=]?|\|[|=]?|(\+\+|--)|[+-]=?|~)|$/gu; + const raw_token_re = /(\s+|\/\/.*|\/\*[\d\D]*?\*\/|\/\*[\d\D]*)|("[^\r\n\\"]*(?:\\.[^\r\n\\"]*)*"|".*)|('\\u[\da-fA-F]{0,4}'?|'\\?.?'?)|(\.?\d)|([\p{L}\p{N}$_]+)|(\()|([;,?:(){}\[\]@]|\.(?:\.\.)?)|([!=/%*^]=?|<>?>?=?|&[&=]?|\|[|=]?|(\+\+|--)|->|[+-]=?|~)|$/gu; const raw_token_types = [ 'wsc', 'string-literal', @@ -209,7 +209,7 @@ function tokenize(source, offset = 0, length = source.length) { * [~!] unary * ``` */ -const operator_re = /^(?:(=|[/%*&|^+-]=|>>>?=|<<=)|(\+\+|--)|([!=]=)|([<>]=?)|([&|^])|(<<|>>>?)|(&&|[|][|])|([*%/])|([+-])|([~!]))$/; +const operator_re = /^(?:(=|[/%*&|^+-]=|>>>?=|<<=)|(\+\+|--)|([!=]=)|([<>]=?)|([&|^])|(<<|>>>?)|(&&|[|][|])|([*%/])|(->)|([+-])|([~!]))$/; /** * @typedef { 'assignment-operator'| @@ -220,6 +220,7 @@ const operator_re = /^(?:(=|[/%*&|^+-]=|>>>?=|<<=)|(\+\+|--)|([!=]=)|([<>]=?)|([ 'shift-operator'| 'logical-operator'| 'muldiv-operator'| + 'lambda-operator'| 'plumin-operator'| 'unary-operator'} OperatorKind */ @@ -233,6 +234,7 @@ const operator_token_types = [ 'shift-operator', 'logical-operator', 'muldiv-operator', + 'lambda-operator', 'plumin-operator', 'unary-operator', ]