diff --git a/packages/compiler/src/ml_parser/lexer.ts b/packages/compiler/src/ml_parser/lexer.ts index 57d12756d7..713b054e7e 100644 --- a/packages/compiler/src/ml_parser/lexer.ts +++ b/packages/compiler/src/ml_parser/lexer.ts @@ -461,12 +461,14 @@ class _Tokenizer { let tagName: string; let prefix: string; let openTagToken: Token|undefined; + let tokensBeforeTagOpen = this.tokens.length; const innerStart = this._cursor.clone(); try { if (!chars.isAsciiLetter(this._cursor.peek())) { throw this._createError( _unexpectedCharacterErrorMsg(this._cursor.peek()), this._cursor.getSpan(start)); } + openTagToken = this._consumeTagOpenStart(start); prefix = openTagToken.parts[0]; tagName = openTagToken.parts[1]; @@ -483,10 +485,10 @@ class _Tokenizer { this._consumeTagOpenEnd(); } catch (e) { if (e instanceof _ControlFlowError) { - // When the start tag is invalid, assume we want a "<" + // When the start tag is invalid (including invalid "attributes"), assume we want a "<" this._cursor = innerStart; if (openTagToken) { - this.tokens.pop(); + this.tokens.length = tokensBeforeTagOpen; } // Back to back text tokens are merged at the end this._beginToken(TokenType.TEXT, start); @@ -528,6 +530,10 @@ class _Tokenizer { } private _consumeAttributeName() { + const attrNameStart = this._cursor.peek(); + if (attrNameStart === chars.$SQ || attrNameStart === chars.$DQ) { + throw this._createError(_unexpectedCharacterErrorMsg(attrNameStart), this._cursor.getSpan()); + } this._beginToken(TokenType.ATTR_NAME); const prefixAndName = this._consumePrefixAndName(); this._endToken(prefixAndName); diff --git a/packages/compiler/test/ml_parser/lexer_spec.ts b/packages/compiler/test/ml_parser/lexer_spec.ts index 897ff19654..533c90f5d3 100644 --- a/packages/compiler/test/ml_parser/lexer_spec.ts +++ b/packages/compiler/test/ml_parser/lexer_spec.ts @@ -7,7 +7,6 @@ */ import {getHtmlTagDefinition} from '../../src/ml_parser/html_tags'; -import {InterpolationConfig} from '../../src/ml_parser/interpolation_config'; import * as lex from '../../src/ml_parser/lexer'; import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_util'; @@ -378,6 +377,18 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u ]); }); + it('should report missing closing single quote', () => { + expect(tokenizeAndHumanizeErrors('')).toEqual([ + [lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'], + ]); + }); + + it('should report missing closing double quote', () => { + expect(tokenizeAndHumanizeErrors(' { @@ -552,6 +563,31 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u ]); }); + it('should parse start tags quotes in place of an attribute name as text', () => { + expect(tokenizeAndHumanizeParts('')).toEqual([ + [lex.TokenType.TEXT, ''], + [lex.TokenType.EOF], + ]); + + expect(tokenizeAndHumanizeParts('')).toEqual([ + [lex.TokenType.TEXT, ''], + [lex.TokenType.EOF], + ]); + }); + + it('should parse start tags quotes in place of an attribute name (after a valid attribute) as text', + () => { + expect(tokenizeAndHumanizeParts('')).toEqual([ + [lex.TokenType.TEXT, ''], + [lex.TokenType.EOF], + ]); + + expect(tokenizeAndHumanizeParts('')).toEqual([ + [lex.TokenType.TEXT, ''], + [lex.TokenType.EOF], + ]); + }); + it('should be able to escape {', () => { expect(tokenizeAndHumanizeParts('{{ "{" }}')).toEqual([ [lex.TokenType.TEXT, '{{ "{" }}'],