fix(core): parse incorrect ML open tag as text (#29328)

This PR alligns markup language lexer with the previous behaviour in version 7.x:
https://stackblitz.com/edit/angular-iancj2

While this behaviour is not perfect (we should be giving users an error message
here about invalid HTML instead of assuming text node) this is probably best we
can do without more substential re-write of lexing / parsing infrastructure.

This PR just fixes #29231 and restores VE behaviour - a more elaborate fix will
be done in a separate PR as it requries non-trivial rewrites.

PR Close #29328
This commit is contained in:
Pawel Kozlowski 2019-03-15 11:56:42 +01:00 committed by Matias Niemelä
parent c0ad9e104d
commit 4605df83e1
2 changed files with 45 additions and 3 deletions

View File

@ -461,12 +461,14 @@ class _Tokenizer {
let tagName: string; let tagName: string;
let prefix: string; let prefix: string;
let openTagToken: Token|undefined; let openTagToken: Token|undefined;
let tokensBeforeTagOpen = this.tokens.length;
const innerStart = this._cursor.clone(); const innerStart = this._cursor.clone();
try { try {
if (!chars.isAsciiLetter(this._cursor.peek())) { if (!chars.isAsciiLetter(this._cursor.peek())) {
throw this._createError( throw this._createError(
_unexpectedCharacterErrorMsg(this._cursor.peek()), this._cursor.getSpan(start)); _unexpectedCharacterErrorMsg(this._cursor.peek()), this._cursor.getSpan(start));
} }
openTagToken = this._consumeTagOpenStart(start); openTagToken = this._consumeTagOpenStart(start);
prefix = openTagToken.parts[0]; prefix = openTagToken.parts[0];
tagName = openTagToken.parts[1]; tagName = openTagToken.parts[1];
@ -483,10 +485,10 @@ class _Tokenizer {
this._consumeTagOpenEnd(); this._consumeTagOpenEnd();
} catch (e) { } catch (e) {
if (e instanceof _ControlFlowError) { if (e instanceof _ControlFlowError) {
// When the start tag is invalid, assume we want a "<" // When the start tag is invalid (including invalid "attributes"), assume we want a "<"
this._cursor = innerStart; this._cursor = innerStart;
if (openTagToken) { if (openTagToken) {
this.tokens.pop(); this.tokens.length = tokensBeforeTagOpen;
} }
// Back to back text tokens are merged at the end // Back to back text tokens are merged at the end
this._beginToken(TokenType.TEXT, start); this._beginToken(TokenType.TEXT, start);
@ -528,6 +530,10 @@ class _Tokenizer {
} }
private _consumeAttributeName() { private _consumeAttributeName() {
const attrNameStart = this._cursor.peek();
if (attrNameStart === chars.$SQ || attrNameStart === chars.$DQ) {
throw this._createError(_unexpectedCharacterErrorMsg(attrNameStart), this._cursor.getSpan());
}
this._beginToken(TokenType.ATTR_NAME); this._beginToken(TokenType.ATTR_NAME);
const prefixAndName = this._consumePrefixAndName(); const prefixAndName = this._consumePrefixAndName();
this._endToken(prefixAndName); this._endToken(prefixAndName);

View File

@ -7,7 +7,6 @@
*/ */
import {getHtmlTagDefinition} from '../../src/ml_parser/html_tags'; import {getHtmlTagDefinition} from '../../src/ml_parser/html_tags';
import {InterpolationConfig} from '../../src/ml_parser/interpolation_config';
import * as lex from '../../src/ml_parser/lexer'; import * as lex from '../../src/ml_parser/lexer';
import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_util'; import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_util';
@ -378,6 +377,18 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
]); ]);
}); });
it('should report missing closing single quote', () => {
expect(tokenizeAndHumanizeErrors('<t a=\'b>')).toEqual([
[lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'],
]);
});
it('should report missing closing double quote', () => {
expect(tokenizeAndHumanizeErrors('<t a="b>')).toEqual([
[lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'],
]);
});
}); });
describe('closing tags', () => { describe('closing tags', () => {
@ -552,6 +563,31 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
]); ]);
}); });
it('should parse start tags quotes in place of an attribute name as text', () => {
expect(tokenizeAndHumanizeParts('<t ">')).toEqual([
[lex.TokenType.TEXT, '<t ">'],
[lex.TokenType.EOF],
]);
expect(tokenizeAndHumanizeParts('<t \'>')).toEqual([
[lex.TokenType.TEXT, '<t \'>'],
[lex.TokenType.EOF],
]);
});
it('should parse start tags quotes in place of an attribute name (after a valid attribute) as text',
() => {
expect(tokenizeAndHumanizeParts('<t a="b" ">')).toEqual([
[lex.TokenType.TEXT, '<t a="b" ">'],
[lex.TokenType.EOF],
]);
expect(tokenizeAndHumanizeParts('<t a=\'b\' \'>')).toEqual([
[lex.TokenType.TEXT, '<t a=\'b\' \'>'],
[lex.TokenType.EOF],
]);
});
it('should be able to escape {', () => { it('should be able to escape {', () => {
expect(tokenizeAndHumanizeParts('{{ "{" }}')).toEqual([ expect(tokenizeAndHumanizeParts('{{ "{" }}')).toEqual([
[lex.TokenType.TEXT, '{{ "{" }}'], [lex.TokenType.TEXT, '{{ "{" }}'],