revert: fix(core): parse incorrect ML open tag as text (#29328)

This commit is contained in:
Matias Niemelä 2019-03-19 11:12:32 -07:00
parent 4605df83e1
commit a3ec058f6b
2 changed files with 3 additions and 45 deletions

View File

@ -461,14 +461,12 @@ class _Tokenizer {
let tagName: string; let tagName: string;
let prefix: string; let prefix: string;
let openTagToken: Token|undefined; let openTagToken: Token|undefined;
let tokensBeforeTagOpen = this.tokens.length;
const innerStart = this._cursor.clone(); const innerStart = this._cursor.clone();
try { try {
if (!chars.isAsciiLetter(this._cursor.peek())) { if (!chars.isAsciiLetter(this._cursor.peek())) {
throw this._createError( throw this._createError(
_unexpectedCharacterErrorMsg(this._cursor.peek()), this._cursor.getSpan(start)); _unexpectedCharacterErrorMsg(this._cursor.peek()), this._cursor.getSpan(start));
} }
openTagToken = this._consumeTagOpenStart(start); openTagToken = this._consumeTagOpenStart(start);
prefix = openTagToken.parts[0]; prefix = openTagToken.parts[0];
tagName = openTagToken.parts[1]; tagName = openTagToken.parts[1];
@ -485,10 +483,10 @@ class _Tokenizer {
this._consumeTagOpenEnd(); this._consumeTagOpenEnd();
} catch (e) { } catch (e) {
if (e instanceof _ControlFlowError) { if (e instanceof _ControlFlowError) {
// When the start tag is invalid (including invalid "attributes"), assume we want a "<" // When the start tag is invalid, assume we want a "<"
this._cursor = innerStart; this._cursor = innerStart;
if (openTagToken) { if (openTagToken) {
this.tokens.length = tokensBeforeTagOpen; this.tokens.pop();
} }
// Back to back text tokens are merged at the end // Back to back text tokens are merged at the end
this._beginToken(TokenType.TEXT, start); this._beginToken(TokenType.TEXT, start);
@ -530,10 +528,6 @@ class _Tokenizer {
} }
private _consumeAttributeName() { private _consumeAttributeName() {
const attrNameStart = this._cursor.peek();
if (attrNameStart === chars.$SQ || attrNameStart === chars.$DQ) {
throw this._createError(_unexpectedCharacterErrorMsg(attrNameStart), this._cursor.getSpan());
}
this._beginToken(TokenType.ATTR_NAME); this._beginToken(TokenType.ATTR_NAME);
const prefixAndName = this._consumePrefixAndName(); const prefixAndName = this._consumePrefixAndName();
this._endToken(prefixAndName); this._endToken(prefixAndName);

View File

@ -7,6 +7,7 @@
*/ */
import {getHtmlTagDefinition} from '../../src/ml_parser/html_tags'; import {getHtmlTagDefinition} from '../../src/ml_parser/html_tags';
import {InterpolationConfig} from '../../src/ml_parser/interpolation_config';
import * as lex from '../../src/ml_parser/lexer'; import * as lex from '../../src/ml_parser/lexer';
import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_util'; import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_util';
@ -377,18 +378,6 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
]); ]);
}); });
it('should report missing closing single quote', () => {
expect(tokenizeAndHumanizeErrors('<t a=\'b>')).toEqual([
[lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'],
]);
});
it('should report missing closing double quote', () => {
expect(tokenizeAndHumanizeErrors('<t a="b>')).toEqual([
[lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'],
]);
});
}); });
describe('closing tags', () => { describe('closing tags', () => {
@ -563,31 +552,6 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
]); ]);
}); });
it('should parse start tags quotes in place of an attribute name as text', () => {
expect(tokenizeAndHumanizeParts('<t ">')).toEqual([
[lex.TokenType.TEXT, '<t ">'],
[lex.TokenType.EOF],
]);
expect(tokenizeAndHumanizeParts('<t \'>')).toEqual([
[lex.TokenType.TEXT, '<t \'>'],
[lex.TokenType.EOF],
]);
});
it('should parse start tags quotes in place of an attribute name (after a valid attribute) as text',
() => {
expect(tokenizeAndHumanizeParts('<t a="b" ">')).toEqual([
[lex.TokenType.TEXT, '<t a="b" ">'],
[lex.TokenType.EOF],
]);
expect(tokenizeAndHumanizeParts('<t a=\'b\' \'>')).toEqual([
[lex.TokenType.TEXT, '<t a=\'b\' \'>'],
[lex.TokenType.EOF],
]);
});
it('should be able to escape {', () => { it('should be able to escape {', () => {
expect(tokenizeAndHumanizeParts('{{ "{" }}')).toEqual([ expect(tokenizeAndHumanizeParts('{{ "{" }}')).toEqual([
[lex.TokenType.TEXT, '{{ "{" }}'], [lex.TokenType.TEXT, '{{ "{" }}'],