This reverts commit 9b3d4f5575
.
PR Close #43033
This commit is contained in:
parent
fac6ea5fae
commit
8b6f7ac36b
|
@ -11,7 +11,7 @@ import {Parser as ExpressionParser} from '../expression_parser/parser';
|
||||||
import * as html from '../ml_parser/ast';
|
import * as html from '../ml_parser/ast';
|
||||||
import {getHtmlTagDefinition} from '../ml_parser/html_tags';
|
import {getHtmlTagDefinition} from '../ml_parser/html_tags';
|
||||||
import {InterpolationConfig} from '../ml_parser/interpolation_config';
|
import {InterpolationConfig} from '../ml_parser/interpolation_config';
|
||||||
import {InterpolatedAttributeToken, InterpolatedTextToken, TokenType} from '../ml_parser/tokens';
|
import {Token, TokenType} from '../ml_parser/lexer';
|
||||||
import {ParseSourceSpan} from '../parse_util';
|
import {ParseSourceSpan} from '../parse_util';
|
||||||
|
|
||||||
import * as i18n from './i18n_ast';
|
import * as i18n from './i18n_ast';
|
||||||
|
@ -163,16 +163,16 @@ class _I18nVisitor implements html.Visitor {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert, text and interpolated tokens up into text and placeholder pieces.
|
* Split the, potentially interpolated, text up into text and placeholder pieces.
|
||||||
*
|
*
|
||||||
* @param tokens The text and interpolated tokens.
|
* @param text The potentially interpolated string to be split.
|
||||||
* @param sourceSpan The span of the whole of the `text` string.
|
* @param sourceSpan The span of the whole of the `text` string.
|
||||||
* @param context The current context of the visitor, used to compute and store placeholders.
|
* @param context The current context of the visitor, used to compute and store placeholders.
|
||||||
* @param previousI18n Any i18n metadata associated with this `text` from a previous pass.
|
* @param previousI18n Any i18n metadata associated with this `text` from a previous pass.
|
||||||
*/
|
*/
|
||||||
private _visitTextWithInterpolation(
|
private _visitTextWithInterpolation(
|
||||||
tokens: (InterpolatedTextToken|InterpolatedAttributeToken)[], sourceSpan: ParseSourceSpan,
|
tokens: Token[], sourceSpan: ParseSourceSpan, context: I18nMessageVisitorContext,
|
||||||
context: I18nMessageVisitorContext, previousI18n: i18n.I18nMeta|undefined): i18n.Node {
|
previousI18n: i18n.I18nMeta|undefined): i18n.Node {
|
||||||
// Return a sequence of `Text` and `Placeholder` nodes grouped in a `Container`.
|
// Return a sequence of `Text` and `Placeholder` nodes grouped in a `Container`.
|
||||||
const nodes: i18n.Node[] = [];
|
const nodes: i18n.Node[] = [];
|
||||||
for (const token of tokens) {
|
for (const token of tokens) {
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
import {AstPath} from '../ast_path';
|
import {AstPath} from '../ast_path';
|
||||||
import {I18nMeta} from '../i18n/i18n_ast';
|
import {I18nMeta} from '../i18n/i18n_ast';
|
||||||
import {ParseSourceSpan} from '../parse_util';
|
import {ParseSourceSpan} from '../parse_util';
|
||||||
import {InterpolatedAttributeToken, InterpolatedTextToken} from './tokens';
|
import {Token} from './lexer';
|
||||||
|
|
||||||
interface BaseNode {
|
interface BaseNode {
|
||||||
sourceSpan: ParseSourceSpan;
|
sourceSpan: ParseSourceSpan;
|
||||||
|
@ -25,8 +25,7 @@ export abstract class NodeWithI18n implements BaseNode {
|
||||||
|
|
||||||
export class Text extends NodeWithI18n {
|
export class Text extends NodeWithI18n {
|
||||||
constructor(
|
constructor(
|
||||||
public value: string, sourceSpan: ParseSourceSpan, public tokens: InterpolatedTextToken[],
|
public value: string, sourceSpan: ParseSourceSpan, public tokens: Token[], i18n?: I18nMeta) {
|
||||||
i18n?: I18nMeta) {
|
|
||||||
super(sourceSpan, i18n);
|
super(sourceSpan, i18n);
|
||||||
}
|
}
|
||||||
override visit(visitor: Visitor, context: any): any {
|
override visit(visitor: Visitor, context: any): any {
|
||||||
|
@ -59,7 +58,7 @@ export class Attribute extends NodeWithI18n {
|
||||||
constructor(
|
constructor(
|
||||||
public name: string, public value: string, sourceSpan: ParseSourceSpan,
|
public name: string, public value: string, sourceSpan: ParseSourceSpan,
|
||||||
readonly keySpan: ParseSourceSpan|undefined, public valueSpan: ParseSourceSpan|undefined,
|
readonly keySpan: ParseSourceSpan|undefined, public valueSpan: ParseSourceSpan|undefined,
|
||||||
public valueTokens: InterpolatedAttributeToken[]|undefined, i18n: I18nMeta|undefined) {
|
public valueTokens: Token[]|undefined, i18n: I18nMeta|undefined) {
|
||||||
super(sourceSpan, i18n);
|
super(sourceSpan, i18n);
|
||||||
}
|
}
|
||||||
override visit(visitor: Visitor, context: any): any {
|
override visit(visitor: Visitor, context: any): any {
|
||||||
|
|
|
@ -8,8 +8,8 @@
|
||||||
|
|
||||||
import * as html from './ast';
|
import * as html from './ast';
|
||||||
import {NGSP_UNICODE} from './entities';
|
import {NGSP_UNICODE} from './entities';
|
||||||
|
import {Token, TokenType} from './lexer';
|
||||||
import {ParseTreeResult} from './parser';
|
import {ParseTreeResult} from './parser';
|
||||||
import {TextToken, TokenType} from './tokens';
|
|
||||||
|
|
||||||
export const PRESERVE_WS_ATTR_NAME = 'ngPreserveWhitespaces';
|
export const PRESERVE_WS_ATTR_NAME = 'ngPreserveWhitespaces';
|
||||||
|
|
||||||
|
@ -77,8 +77,8 @@ export class WhitespaceVisitor implements html.Visitor {
|
||||||
if (isNotBlank || hasExpansionSibling) {
|
if (isNotBlank || hasExpansionSibling) {
|
||||||
// Process the whitespace in the tokens of this Text node
|
// Process the whitespace in the tokens of this Text node
|
||||||
const tokens = text.tokens.map(
|
const tokens = text.tokens.map(
|
||||||
token =>
|
token => token.type === TokenType.TEXT ? createTextTokenAfterWhitespaceProcessing(token) :
|
||||||
token.type === TokenType.TEXT ? createWhitespaceProcessedTextToken(token) : token);
|
token);
|
||||||
// Process the whitespace of the value of this Text node
|
// Process the whitespace of the value of this Text node
|
||||||
const value = processWhitespace(text.value);
|
const value = processWhitespace(text.value);
|
||||||
return new html.Text(value, text.sourceSpan, tokens, text.i18n);
|
return new html.Text(value, text.sourceSpan, tokens, text.i18n);
|
||||||
|
@ -100,8 +100,8 @@ export class WhitespaceVisitor implements html.Visitor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function createWhitespaceProcessedTextToken({type, parts, sourceSpan}: TextToken): TextToken {
|
function createTextTokenAfterWhitespaceProcessing(token: Token): Token {
|
||||||
return {type, parts: [processWhitespace(parts[0])], sourceSpan};
|
return new Token(token.type, [processWhitespace(token.parts[0])], token.sourceSpan);
|
||||||
}
|
}
|
||||||
|
|
||||||
function processWhitespace(text: string): string {
|
function processWhitespace(text: string): string {
|
||||||
|
|
|
@ -12,7 +12,39 @@ import {NAMED_ENTITIES} from './entities';
|
||||||
|
|
||||||
import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from './interpolation_config';
|
import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from './interpolation_config';
|
||||||
import {TagContentType, TagDefinition} from './tags';
|
import {TagContentType, TagDefinition} from './tags';
|
||||||
import {IncompleteTagOpenToken, TagOpenStartToken, Token, TokenType} from './tokens';
|
|
||||||
|
export enum TokenType {
|
||||||
|
TAG_OPEN_START,
|
||||||
|
TAG_OPEN_END,
|
||||||
|
TAG_OPEN_END_VOID,
|
||||||
|
TAG_CLOSE,
|
||||||
|
INCOMPLETE_TAG_OPEN,
|
||||||
|
TEXT,
|
||||||
|
ESCAPABLE_RAW_TEXT,
|
||||||
|
RAW_TEXT,
|
||||||
|
INTERPOLATION,
|
||||||
|
ENCODED_ENTITY,
|
||||||
|
COMMENT_START,
|
||||||
|
COMMENT_END,
|
||||||
|
CDATA_START,
|
||||||
|
CDATA_END,
|
||||||
|
ATTR_NAME,
|
||||||
|
ATTR_QUOTE,
|
||||||
|
ATTR_VALUE_TEXT,
|
||||||
|
ATTR_VALUE_INTERPOLATION,
|
||||||
|
DOC_TYPE,
|
||||||
|
EXPANSION_FORM_START,
|
||||||
|
EXPANSION_CASE_VALUE,
|
||||||
|
EXPANSION_CASE_EXP_START,
|
||||||
|
EXPANSION_CASE_EXP_END,
|
||||||
|
EXPANSION_FORM_END,
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
export class Token {
|
||||||
|
constructor(
|
||||||
|
public type: TokenType|null, public parts: string[], public sourceSpan: ParseSourceSpan) {}
|
||||||
|
}
|
||||||
|
|
||||||
export class TokenError extends ParseError {
|
export class TokenError extends ParseError {
|
||||||
constructor(errorMsg: string, public tokenType: TokenType|null, span: ParseSourceSpan) {
|
constructor(errorMsg: string, public tokenType: TokenType|null, span: ParseSourceSpan) {
|
||||||
|
@ -258,12 +290,9 @@ class _Tokenizer {
|
||||||
'Programming error - attempted to end a token which has no token type', null,
|
'Programming error - attempted to end a token which has no token type', null,
|
||||||
this._cursor.getSpan(this._currentTokenStart));
|
this._cursor.getSpan(this._currentTokenStart));
|
||||||
}
|
}
|
||||||
const token = {
|
const token = new Token(
|
||||||
type: this._currentTokenType,
|
this._currentTokenType, parts,
|
||||||
parts,
|
(end ?? this._cursor).getSpan(this._currentTokenStart, this._leadingTriviaCodePoints));
|
||||||
sourceSpan:
|
|
||||||
(end ?? this._cursor).getSpan(this._currentTokenStart, this._leadingTriviaCodePoints),
|
|
||||||
} as Token;
|
|
||||||
this.tokens.push(token);
|
this.tokens.push(token);
|
||||||
this._currentTokenStart = null;
|
this._currentTokenStart = null;
|
||||||
this._currentTokenType = null;
|
this._currentTokenType = null;
|
||||||
|
@ -498,7 +527,7 @@ class _Tokenizer {
|
||||||
private _consumeTagOpen(start: CharacterCursor) {
|
private _consumeTagOpen(start: CharacterCursor) {
|
||||||
let tagName: string;
|
let tagName: string;
|
||||||
let prefix: string;
|
let prefix: string;
|
||||||
let openTagToken: TagOpenStartToken|IncompleteTagOpenToken|undefined;
|
let openTagToken: Token|undefined;
|
||||||
try {
|
try {
|
||||||
if (!chars.isAsciiLetter(this._cursor.peek())) {
|
if (!chars.isAsciiLetter(this._cursor.peek())) {
|
||||||
throw this._createError(
|
throw this._createError(
|
||||||
|
@ -561,10 +590,10 @@ class _Tokenizer {
|
||||||
this._endToken([prefix, tagName]);
|
this._endToken([prefix, tagName]);
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeTagOpenStart(start: CharacterCursor): TagOpenStartToken {
|
private _consumeTagOpenStart(start: CharacterCursor) {
|
||||||
this._beginToken(TokenType.TAG_OPEN_START, start);
|
this._beginToken(TokenType.TAG_OPEN_START, start);
|
||||||
const parts = this._consumePrefixAndName();
|
const parts = this._consumePrefixAndName();
|
||||||
return this._endToken(parts) as TagOpenStartToken;
|
return this._endToken(parts);
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeAttributeName() {
|
private _consumeAttributeName() {
|
||||||
|
@ -735,7 +764,7 @@ class _Tokenizer {
|
||||||
*/
|
*/
|
||||||
private _consumeInterpolation(
|
private _consumeInterpolation(
|
||||||
interpolationTokenType: TokenType, interpolationStart: CharacterCursor,
|
interpolationTokenType: TokenType, interpolationStart: CharacterCursor,
|
||||||
prematureEndPredicate: (() => boolean)|null): void {
|
prematureEndPredicate: (() => boolean)|null) {
|
||||||
const parts: string[] = [];
|
const parts: string[] = [];
|
||||||
this._beginToken(interpolationTokenType, interpolationStart);
|
this._beginToken(interpolationTokenType, interpolationStart);
|
||||||
parts.push(this._interpolationConfig.start);
|
parts.push(this._interpolationConfig.start);
|
||||||
|
@ -754,8 +783,7 @@ class _Tokenizer {
|
||||||
// (This is actually wrong but here for backward compatibility).
|
// (This is actually wrong but here for backward compatibility).
|
||||||
this._cursor = current;
|
this._cursor = current;
|
||||||
parts.push(this._getProcessedChars(expressionStart, current));
|
parts.push(this._getProcessedChars(expressionStart, current));
|
||||||
this._endToken(parts);
|
return this._endToken(parts);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inQuote === null) {
|
if (inQuote === null) {
|
||||||
|
@ -763,8 +791,7 @@ class _Tokenizer {
|
||||||
// We are not in a string, and we hit the end interpolation marker
|
// We are not in a string, and we hit the end interpolation marker
|
||||||
parts.push(this._getProcessedChars(expressionStart, current));
|
parts.push(this._getProcessedChars(expressionStart, current));
|
||||||
parts.push(this._interpolationConfig.end);
|
parts.push(this._interpolationConfig.end);
|
||||||
this._endToken(parts);
|
return this._endToken(parts);
|
||||||
return;
|
|
||||||
} else if (this._attemptStr('//')) {
|
} else if (this._attemptStr('//')) {
|
||||||
// Once we are in a comment we ignore any quotes
|
// Once we are in a comment we ignore any quotes
|
||||||
inComment = true;
|
inComment = true;
|
||||||
|
@ -787,7 +814,7 @@ class _Tokenizer {
|
||||||
|
|
||||||
// We hit EOF without finding a closing interpolation marker
|
// We hit EOF without finding a closing interpolation marker
|
||||||
parts.push(this._getProcessedChars(expressionStart, this._cursor));
|
parts.push(this._getProcessedChars(expressionStart, this._cursor));
|
||||||
this._endToken(parts);
|
return this._endToken(parts);
|
||||||
}
|
}
|
||||||
|
|
||||||
private _getProcessedChars(start: CharacterCursor, end: CharacterCursor): string {
|
private _getProcessedChars(start: CharacterCursor, end: CharacterCursor): string {
|
||||||
|
|
|
@ -10,9 +10,8 @@ import {ParseError, ParseLocation, ParseSourceSpan} from '../parse_util';
|
||||||
|
|
||||||
import * as html from './ast';
|
import * as html from './ast';
|
||||||
import {NAMED_ENTITIES} from './entities';
|
import {NAMED_ENTITIES} from './entities';
|
||||||
import {tokenize, TokenizeOptions} from './lexer';
|
import * as lex from './lexer';
|
||||||
import {getNsPrefix, mergeNsAndName, splitNsName, TagDefinition} from './tags';
|
import {getNsPrefix, mergeNsAndName, splitNsName, TagDefinition} from './tags';
|
||||||
import {AttributeNameToken, AttributeQuoteToken, CdataStartToken, CommentStartToken, ExpansionCaseExpressionEndToken, ExpansionCaseExpressionStartToken, ExpansionCaseValueToken, ExpansionFormStartToken, IncompleteTagOpenToken, InterpolatedAttributeToken, InterpolatedTextToken, TagCloseToken, TagOpenStartToken, TextToken, Token, TokenType} from './tokens';
|
|
||||||
|
|
||||||
export class TreeError extends ParseError {
|
export class TreeError extends ParseError {
|
||||||
static create(elementName: string|null, span: ParseSourceSpan, msg: string): TreeError {
|
static create(elementName: string|null, span: ParseSourceSpan, msg: string): TreeError {
|
||||||
|
@ -31,8 +30,8 @@ export class ParseTreeResult {
|
||||||
export class Parser {
|
export class Parser {
|
||||||
constructor(public getTagDefinition: (tagName: string) => TagDefinition) {}
|
constructor(public getTagDefinition: (tagName: string) => TagDefinition) {}
|
||||||
|
|
||||||
parse(source: string, url: string, options?: TokenizeOptions): ParseTreeResult {
|
parse(source: string, url: string, options?: lex.TokenizeOptions): ParseTreeResult {
|
||||||
const tokenizeResult = tokenize(source, url, this.getTagDefinition, options);
|
const tokenizeResult = lex.tokenize(source, url, this.getTagDefinition, options);
|
||||||
const parser = new _TreeBuilder(tokenizeResult.tokens, this.getTagDefinition);
|
const parser = new _TreeBuilder(tokenizeResult.tokens, this.getTagDefinition);
|
||||||
parser.build();
|
parser.build();
|
||||||
return new ParseTreeResult(
|
return new ParseTreeResult(
|
||||||
|
@ -44,38 +43,38 @@ export class Parser {
|
||||||
|
|
||||||
class _TreeBuilder {
|
class _TreeBuilder {
|
||||||
private _index: number = -1;
|
private _index: number = -1;
|
||||||
// `_peek` will be initialized by the call to `_advance()` in the constructor.
|
// `_peek` will be initialized by the call to `advance()` in the constructor.
|
||||||
private _peek!: Token;
|
private _peek!: lex.Token;
|
||||||
private _elementStack: html.Element[] = [];
|
private _elementStack: html.Element[] = [];
|
||||||
|
|
||||||
rootNodes: html.Node[] = [];
|
rootNodes: html.Node[] = [];
|
||||||
errors: TreeError[] = [];
|
errors: TreeError[] = [];
|
||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
private tokens: Token[], private getTagDefinition: (tagName: string) => TagDefinition) {
|
private tokens: lex.Token[], private getTagDefinition: (tagName: string) => TagDefinition) {
|
||||||
this._advance();
|
this._advance();
|
||||||
}
|
}
|
||||||
|
|
||||||
build(): void {
|
build(): void {
|
||||||
while (this._peek.type !== TokenType.EOF) {
|
while (this._peek.type !== lex.TokenType.EOF) {
|
||||||
if (this._peek.type === TokenType.TAG_OPEN_START ||
|
if (this._peek.type === lex.TokenType.TAG_OPEN_START ||
|
||||||
this._peek.type === TokenType.INCOMPLETE_TAG_OPEN) {
|
this._peek.type === lex.TokenType.INCOMPLETE_TAG_OPEN) {
|
||||||
this._consumeStartTag(this._advance<TagOpenStartToken|IncompleteTagOpenToken>());
|
this._consumeStartTag(this._advance());
|
||||||
} else if (this._peek.type === TokenType.TAG_CLOSE) {
|
} else if (this._peek.type === lex.TokenType.TAG_CLOSE) {
|
||||||
this._consumeEndTag(this._advance<TagCloseToken>());
|
this._consumeEndTag(this._advance());
|
||||||
} else if (this._peek.type === TokenType.CDATA_START) {
|
} else if (this._peek.type === lex.TokenType.CDATA_START) {
|
||||||
this._closeVoidElement();
|
this._closeVoidElement();
|
||||||
this._consumeCdata(this._advance<CdataStartToken>());
|
this._consumeCdata(this._advance());
|
||||||
} else if (this._peek.type === TokenType.COMMENT_START) {
|
} else if (this._peek.type === lex.TokenType.COMMENT_START) {
|
||||||
this._closeVoidElement();
|
this._closeVoidElement();
|
||||||
this._consumeComment(this._advance<CommentStartToken>());
|
this._consumeComment(this._advance());
|
||||||
} else if (
|
} else if (
|
||||||
this._peek.type === TokenType.TEXT || this._peek.type === TokenType.RAW_TEXT ||
|
this._peek.type === lex.TokenType.TEXT || this._peek.type === lex.TokenType.RAW_TEXT ||
|
||||||
this._peek.type === TokenType.ESCAPABLE_RAW_TEXT) {
|
this._peek.type === lex.TokenType.ESCAPABLE_RAW_TEXT) {
|
||||||
this._closeVoidElement();
|
this._closeVoidElement();
|
||||||
this._consumeText(this._advance<TextToken>());
|
this._consumeText(this._advance());
|
||||||
} else if (this._peek.type === TokenType.EXPANSION_FORM_START) {
|
} else if (this._peek.type === lex.TokenType.EXPANSION_FORM_START) {
|
||||||
this._consumeExpansion(this._advance<ExpansionFormStartToken>());
|
this._consumeExpansion(this._advance());
|
||||||
} else {
|
} else {
|
||||||
// Skip all other tokens...
|
// Skip all other tokens...
|
||||||
this._advance();
|
this._advance();
|
||||||
|
@ -83,50 +82,50 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private _advance<T extends Token>(): T {
|
private _advance(): lex.Token {
|
||||||
const prev = this._peek;
|
const prev = this._peek;
|
||||||
if (this._index < this.tokens.length - 1) {
|
if (this._index < this.tokens.length - 1) {
|
||||||
// Note: there is always an EOF token at the end
|
// Note: there is always an EOF token at the end
|
||||||
this._index++;
|
this._index++;
|
||||||
}
|
}
|
||||||
this._peek = this.tokens[this._index];
|
this._peek = this.tokens[this._index];
|
||||||
return prev as T;
|
return prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
private _advanceIf<T extends TokenType>(type: T): (Token&{type: T})|null {
|
private _advanceIf(type: lex.TokenType): lex.Token|null {
|
||||||
if (this._peek.type === type) {
|
if (this._peek.type === type) {
|
||||||
return this._advance<Token&{type: T}>();
|
return this._advance();
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeCdata(_startToken: CdataStartToken) {
|
private _consumeCdata(_startToken: lex.Token) {
|
||||||
this._consumeText(this._advance<TextToken>());
|
this._consumeText(this._advance());
|
||||||
this._advanceIf(TokenType.CDATA_END);
|
this._advanceIf(lex.TokenType.CDATA_END);
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeComment(token: CommentStartToken) {
|
private _consumeComment(token: lex.Token) {
|
||||||
const text = this._advanceIf(TokenType.RAW_TEXT);
|
const text = this._advanceIf(lex.TokenType.RAW_TEXT);
|
||||||
this._advanceIf(TokenType.COMMENT_END);
|
this._advanceIf(lex.TokenType.COMMENT_END);
|
||||||
const value = text != null ? text.parts[0].trim() : null;
|
const value = text != null ? text.parts[0].trim() : null;
|
||||||
this._addToParent(new html.Comment(value, token.sourceSpan));
|
this._addToParent(new html.Comment(value, token.sourceSpan));
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeExpansion(token: ExpansionFormStartToken) {
|
private _consumeExpansion(token: lex.Token) {
|
||||||
const switchValue = this._advance<TextToken>();
|
const switchValue = this._advance();
|
||||||
|
|
||||||
const type = this._advance<TextToken>();
|
const type = this._advance();
|
||||||
const cases: html.ExpansionCase[] = [];
|
const cases: html.ExpansionCase[] = [];
|
||||||
|
|
||||||
// read =
|
// read =
|
||||||
while (this._peek.type === TokenType.EXPANSION_CASE_VALUE) {
|
while (this._peek.type === lex.TokenType.EXPANSION_CASE_VALUE) {
|
||||||
const expCase = this._parseExpansionCase();
|
const expCase = this._parseExpansionCase();
|
||||||
if (!expCase) return; // error
|
if (!expCase) return; // error
|
||||||
cases.push(expCase);
|
cases.push(expCase);
|
||||||
}
|
}
|
||||||
|
|
||||||
// read the final }
|
// read the final }
|
||||||
if (this._peek.type !== TokenType.EXPANSION_FORM_END) {
|
if (this._peek.type !== lex.TokenType.EXPANSION_FORM_END) {
|
||||||
this.errors.push(
|
this.errors.push(
|
||||||
TreeError.create(null, this._peek.sourceSpan, `Invalid ICU message. Missing '}'.`));
|
TreeError.create(null, this._peek.sourceSpan, `Invalid ICU message. Missing '}'.`));
|
||||||
return;
|
return;
|
||||||
|
@ -140,23 +139,23 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
|
|
||||||
private _parseExpansionCase(): html.ExpansionCase|null {
|
private _parseExpansionCase(): html.ExpansionCase|null {
|
||||||
const value = this._advance<ExpansionCaseValueToken>();
|
const value = this._advance();
|
||||||
|
|
||||||
// read {
|
// read {
|
||||||
if (this._peek.type !== TokenType.EXPANSION_CASE_EXP_START) {
|
if (this._peek.type !== lex.TokenType.EXPANSION_CASE_EXP_START) {
|
||||||
this.errors.push(
|
this.errors.push(
|
||||||
TreeError.create(null, this._peek.sourceSpan, `Invalid ICU message. Missing '{'.`));
|
TreeError.create(null, this._peek.sourceSpan, `Invalid ICU message. Missing '{'.`));
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// read until }
|
// read until }
|
||||||
const start = this._advance<ExpansionCaseExpressionStartToken>();
|
const start = this._advance();
|
||||||
|
|
||||||
const exp = this._collectExpansionExpTokens(start);
|
const exp = this._collectExpansionExpTokens(start);
|
||||||
if (!exp) return null;
|
if (!exp) return null;
|
||||||
|
|
||||||
const end = this._advance<ExpansionCaseExpressionEndToken>();
|
const end = this._advance();
|
||||||
exp.push({type: TokenType.EOF, parts: [], sourceSpan: end.sourceSpan});
|
exp.push(new lex.Token(lex.TokenType.EOF, [], end.sourceSpan));
|
||||||
|
|
||||||
// parse everything in between { and }
|
// parse everything in between { and }
|
||||||
const expansionCaseParser = new _TreeBuilder(exp, this.getTagDefinition);
|
const expansionCaseParser = new _TreeBuilder(exp, this.getTagDefinition);
|
||||||
|
@ -174,18 +173,18 @@ class _TreeBuilder {
|
||||||
value.parts[0], expansionCaseParser.rootNodes, sourceSpan, value.sourceSpan, expSourceSpan);
|
value.parts[0], expansionCaseParser.rootNodes, sourceSpan, value.sourceSpan, expSourceSpan);
|
||||||
}
|
}
|
||||||
|
|
||||||
private _collectExpansionExpTokens(start: Token): Token[]|null {
|
private _collectExpansionExpTokens(start: lex.Token): lex.Token[]|null {
|
||||||
const exp: Token[] = [];
|
const exp: lex.Token[] = [];
|
||||||
const expansionFormStack = [TokenType.EXPANSION_CASE_EXP_START];
|
const expansionFormStack = [lex.TokenType.EXPANSION_CASE_EXP_START];
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if (this._peek.type === TokenType.EXPANSION_FORM_START ||
|
if (this._peek.type === lex.TokenType.EXPANSION_FORM_START ||
|
||||||
this._peek.type === TokenType.EXPANSION_CASE_EXP_START) {
|
this._peek.type === lex.TokenType.EXPANSION_CASE_EXP_START) {
|
||||||
expansionFormStack.push(this._peek.type);
|
expansionFormStack.push(this._peek.type);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this._peek.type === TokenType.EXPANSION_CASE_EXP_END) {
|
if (this._peek.type === lex.TokenType.EXPANSION_CASE_EXP_END) {
|
||||||
if (lastOnStack(expansionFormStack, TokenType.EXPANSION_CASE_EXP_START)) {
|
if (lastOnStack(expansionFormStack, lex.TokenType.EXPANSION_CASE_EXP_START)) {
|
||||||
expansionFormStack.pop();
|
expansionFormStack.pop();
|
||||||
if (expansionFormStack.length == 0) return exp;
|
if (expansionFormStack.length == 0) return exp;
|
||||||
|
|
||||||
|
@ -196,8 +195,8 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this._peek.type === TokenType.EXPANSION_FORM_END) {
|
if (this._peek.type === lex.TokenType.EXPANSION_FORM_END) {
|
||||||
if (lastOnStack(expansionFormStack, TokenType.EXPANSION_FORM_START)) {
|
if (lastOnStack(expansionFormStack, lex.TokenType.EXPANSION_FORM_START)) {
|
||||||
expansionFormStack.pop();
|
expansionFormStack.pop();
|
||||||
} else {
|
} else {
|
||||||
this.errors.push(
|
this.errors.push(
|
||||||
|
@ -206,7 +205,7 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this._peek.type === TokenType.EOF) {
|
if (this._peek.type === lex.TokenType.EOF) {
|
||||||
this.errors.push(
|
this.errors.push(
|
||||||
TreeError.create(null, start.sourceSpan, `Invalid ICU message. Missing '}'.`));
|
TreeError.create(null, start.sourceSpan, `Invalid ICU message. Missing '}'.`));
|
||||||
return null;
|
return null;
|
||||||
|
@ -216,7 +215,7 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeText(token: InterpolatedTextToken) {
|
private _consumeText(token: lex.Token) {
|
||||||
const tokens = [token];
|
const tokens = [token];
|
||||||
const startSpan = token.sourceSpan;
|
const startSpan = token.sourceSpan;
|
||||||
let text = token.parts[0];
|
let text = token.parts[0];
|
||||||
|
@ -225,21 +224,22 @@ class _TreeBuilder {
|
||||||
if (parent != null && parent.children.length == 0 &&
|
if (parent != null && parent.children.length == 0 &&
|
||||||
this.getTagDefinition(parent.name).ignoreFirstLf) {
|
this.getTagDefinition(parent.name).ignoreFirstLf) {
|
||||||
text = text.substring(1);
|
text = text.substring(1);
|
||||||
tokens[0] = {type: token.type, sourceSpan: token.sourceSpan, parts: [text]} as typeof token;
|
tokens[0] = {type: token.type, sourceSpan: token.sourceSpan, parts: [text]};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (this._peek.type === TokenType.INTERPOLATION || this._peek.type === TokenType.TEXT ||
|
while (this._peek.type === lex.TokenType.INTERPOLATION ||
|
||||||
this._peek.type === TokenType.ENCODED_ENTITY) {
|
this._peek.type === lex.TokenType.TEXT ||
|
||||||
|
this._peek.type === lex.TokenType.ENCODED_ENTITY) {
|
||||||
token = this._advance();
|
token = this._advance();
|
||||||
tokens.push(token);
|
tokens.push(token);
|
||||||
if (token.type === TokenType.INTERPOLATION) {
|
if (token.type === lex.TokenType.INTERPOLATION) {
|
||||||
// For backward compatibility we decode HTML entities that appear in interpolation
|
// For backward compatibility we decode HTML entities that appear in interpolation
|
||||||
// expressions. This is arguably a bug, but it could be a considerable breaking change to
|
// expressions. This is arguably a bug, but it could be a considerable breaking change to
|
||||||
// fix it. It should be addressed in a larger project to refactor the entire parser/lexer
|
// fix it. It should be addressed in a larger project to refactor the entire parser/lexer
|
||||||
// chain after View Engine has been removed.
|
// chain after View Engine has been removed.
|
||||||
text += token.parts.join('').replace(/&([^;]+);/g, decodeEntity);
|
text += token.parts.join('').replace(/&([^;]+);/g, decodeEntity);
|
||||||
} else if (token.type === TokenType.ENCODED_ENTITY) {
|
} else if (token.type === lex.TokenType.ENCODED_ENTITY) {
|
||||||
text += token.parts[0];
|
text += token.parts[0];
|
||||||
} else {
|
} else {
|
||||||
text += token.parts.join('');
|
text += token.parts.join('');
|
||||||
|
@ -262,17 +262,17 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeStartTag(startTagToken: TagOpenStartToken|IncompleteTagOpenToken) {
|
private _consumeStartTag(startTagToken: lex.Token) {
|
||||||
const [prefix, name] = startTagToken.parts;
|
const [prefix, name] = startTagToken.parts;
|
||||||
const attrs: html.Attribute[] = [];
|
const attrs: html.Attribute[] = [];
|
||||||
while (this._peek.type === TokenType.ATTR_NAME) {
|
while (this._peek.type === lex.TokenType.ATTR_NAME) {
|
||||||
attrs.push(this._consumeAttr(this._advance<AttributeNameToken>()));
|
attrs.push(this._consumeAttr(this._advance()));
|
||||||
}
|
}
|
||||||
const fullName = this._getElementFullName(prefix, name, this._getParentElement());
|
const fullName = this._getElementFullName(prefix, name, this._getParentElement());
|
||||||
let selfClosing = false;
|
let selfClosing = false;
|
||||||
// Note: There could have been a tokenizer error
|
// Note: There could have been a tokenizer error
|
||||||
// so that we don't get a token for the end tag...
|
// so that we don't get a token for the end tag...
|
||||||
if (this._peek.type === TokenType.TAG_OPEN_END_VOID) {
|
if (this._peek.type === lex.TokenType.TAG_OPEN_END_VOID) {
|
||||||
this._advance();
|
this._advance();
|
||||||
selfClosing = true;
|
selfClosing = true;
|
||||||
const tagDef = this.getTagDefinition(fullName);
|
const tagDef = this.getTagDefinition(fullName);
|
||||||
|
@ -281,7 +281,7 @@ class _TreeBuilder {
|
||||||
fullName, startTagToken.sourceSpan,
|
fullName, startTagToken.sourceSpan,
|
||||||
`Only void and foreign elements can be self closed "${startTagToken.parts[1]}"`));
|
`Only void and foreign elements can be self closed "${startTagToken.parts[1]}"`));
|
||||||
}
|
}
|
||||||
} else if (this._peek.type === TokenType.TAG_OPEN_END) {
|
} else if (this._peek.type === lex.TokenType.TAG_OPEN_END) {
|
||||||
this._advance();
|
this._advance();
|
||||||
selfClosing = false;
|
selfClosing = false;
|
||||||
}
|
}
|
||||||
|
@ -297,7 +297,7 @@ class _TreeBuilder {
|
||||||
// Elements that are self-closed have their `endSourceSpan` set to the full span, as the
|
// Elements that are self-closed have their `endSourceSpan` set to the full span, as the
|
||||||
// element start tag also represents the end tag.
|
// element start tag also represents the end tag.
|
||||||
this._popElement(fullName, span);
|
this._popElement(fullName, span);
|
||||||
} else if (startTagToken.type === TokenType.INCOMPLETE_TAG_OPEN) {
|
} else if (startTagToken.type === lex.TokenType.INCOMPLETE_TAG_OPEN) {
|
||||||
// We already know the opening tag is not complete, so it is unlikely it has a corresponding
|
// We already know the opening tag is not complete, so it is unlikely it has a corresponding
|
||||||
// close tag. Let's optimistically parse it as a full element and emit an error.
|
// close tag. Let's optimistically parse it as a full element and emit an error.
|
||||||
this._popElement(fullName, null);
|
this._popElement(fullName, null);
|
||||||
|
@ -317,7 +317,7 @@ class _TreeBuilder {
|
||||||
this._elementStack.push(el);
|
this._elementStack.push(el);
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeEndTag(endTagToken: TagCloseToken) {
|
private _consumeEndTag(endTagToken: lex.Token) {
|
||||||
const fullName = this._getElementFullName(
|
const fullName = this._getElementFullName(
|
||||||
endTagToken.parts[0], endTagToken.parts[1], this._getParentElement());
|
endTagToken.parts[0], endTagToken.parts[1], this._getParentElement());
|
||||||
|
|
||||||
|
@ -363,40 +363,35 @@ class _TreeBuilder {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private _consumeAttr(attrName: AttributeNameToken): html.Attribute {
|
private _consumeAttr(attrName: lex.Token): html.Attribute {
|
||||||
const fullName = mergeNsAndName(attrName.parts[0], attrName.parts[1]);
|
const fullName = mergeNsAndName(attrName.parts[0], attrName.parts[1]);
|
||||||
let attrEnd = attrName.sourceSpan.end;
|
let attrEnd = attrName.sourceSpan.end;
|
||||||
|
|
||||||
// Consume any quote
|
// Consume any quote
|
||||||
if (this._peek.type === TokenType.ATTR_QUOTE) {
|
if (this._peek.type === lex.TokenType.ATTR_QUOTE) {
|
||||||
this._advance();
|
this._advance();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consume the attribute value
|
// Consume the attribute value
|
||||||
let value = '';
|
let value = '';
|
||||||
const valueTokens: InterpolatedAttributeToken[] = [];
|
const valueTokens: lex.Token[] = [];
|
||||||
let valueStartSpan: ParseSourceSpan|undefined = undefined;
|
let valueStartSpan: ParseSourceSpan|undefined = undefined;
|
||||||
let valueEnd: ParseLocation|undefined = undefined;
|
let valueEnd: ParseLocation|undefined = undefined;
|
||||||
// NOTE: We need to use a new variable `nextTokenType` here to hide the actual type of
|
if (this._peek.type === lex.TokenType.ATTR_VALUE_TEXT) {
|
||||||
// `_peek.type` from TS. Otherwise TS will narrow the type of `_peek.type` preventing it from
|
|
||||||
// being able to consider `ATTR_VALUE_INTERPOLATION` as an option. This is because TS is not
|
|
||||||
// able to see that `_advance()` will actually mutate `_peek`.
|
|
||||||
const nextTokenType = this._peek.type;
|
|
||||||
if (nextTokenType === TokenType.ATTR_VALUE_TEXT) {
|
|
||||||
valueStartSpan = this._peek.sourceSpan;
|
valueStartSpan = this._peek.sourceSpan;
|
||||||
valueEnd = this._peek.sourceSpan.end;
|
valueEnd = this._peek.sourceSpan.end;
|
||||||
while (this._peek.type === TokenType.ATTR_VALUE_TEXT ||
|
while (this._peek.type === lex.TokenType.ATTR_VALUE_TEXT ||
|
||||||
this._peek.type === TokenType.ATTR_VALUE_INTERPOLATION ||
|
this._peek.type === lex.TokenType.ATTR_VALUE_INTERPOLATION ||
|
||||||
this._peek.type === TokenType.ENCODED_ENTITY) {
|
this._peek.type === lex.TokenType.ENCODED_ENTITY) {
|
||||||
const valueToken = this._advance<InterpolatedAttributeToken>();
|
const valueToken = this._advance();
|
||||||
valueTokens.push(valueToken);
|
valueTokens.push(valueToken);
|
||||||
if (valueToken.type === TokenType.ATTR_VALUE_INTERPOLATION) {
|
if (valueToken.type === lex.TokenType.ATTR_VALUE_INTERPOLATION) {
|
||||||
// For backward compatibility we decode HTML entities that appear in interpolation
|
// For backward compatibility we decode HTML entities that appear in interpolation
|
||||||
// expressions. This is arguably a bug, but it could be a considerable breaking change to
|
// expressions. This is arguably a bug, but it could be a considerable breaking change to
|
||||||
// fix it. It should be addressed in a larger project to refactor the entire parser/lexer
|
// fix it. It should be addressed in a larger project to refactor the entire parser/lexer
|
||||||
// chain after View Engine has been removed.
|
// chain after View Engine has been removed.
|
||||||
value += valueToken.parts.join('').replace(/&([^;]+);/g, decodeEntity);
|
value += valueToken.parts.join('').replace(/&([^;]+);/g, decodeEntity);
|
||||||
} else if (valueToken.type === TokenType.ENCODED_ENTITY) {
|
} else if (valueToken.type === lex.TokenType.ENCODED_ENTITY) {
|
||||||
value += valueToken.parts[0];
|
value += valueToken.parts[0];
|
||||||
} else {
|
} else {
|
||||||
value += valueToken.parts.join('');
|
value += valueToken.parts.join('');
|
||||||
|
@ -406,8 +401,8 @@ class _TreeBuilder {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consume any quote
|
// Consume any quote
|
||||||
if (this._peek.type === TokenType.ATTR_QUOTE) {
|
if (this._peek.type === lex.TokenType.ATTR_QUOTE) {
|
||||||
const quoteToken = this._advance<AttributeQuoteToken>();
|
const quoteToken = this._advance();
|
||||||
attrEnd = quoteToken.sourceSpan.end;
|
attrEnd = quoteToken.sourceSpan.end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,172 +0,0 @@
|
||||||
/**
|
|
||||||
* @license
|
|
||||||
* Copyright Google LLC All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by an MIT-style license that can be
|
|
||||||
* found in the LICENSE file at https://angular.io/license
|
|
||||||
*/
|
|
||||||
|
|
||||||
import {ParseSourceSpan} from '../parse_util';
|
|
||||||
|
|
||||||
export const enum TokenType {
|
|
||||||
TAG_OPEN_START,
|
|
||||||
TAG_OPEN_END,
|
|
||||||
TAG_OPEN_END_VOID,
|
|
||||||
TAG_CLOSE,
|
|
||||||
INCOMPLETE_TAG_OPEN,
|
|
||||||
TEXT,
|
|
||||||
ESCAPABLE_RAW_TEXT,
|
|
||||||
RAW_TEXT,
|
|
||||||
INTERPOLATION,
|
|
||||||
ENCODED_ENTITY,
|
|
||||||
COMMENT_START,
|
|
||||||
COMMENT_END,
|
|
||||||
CDATA_START,
|
|
||||||
CDATA_END,
|
|
||||||
ATTR_NAME,
|
|
||||||
ATTR_QUOTE,
|
|
||||||
ATTR_VALUE_TEXT,
|
|
||||||
ATTR_VALUE_INTERPOLATION,
|
|
||||||
DOC_TYPE,
|
|
||||||
EXPANSION_FORM_START,
|
|
||||||
EXPANSION_CASE_VALUE,
|
|
||||||
EXPANSION_CASE_EXP_START,
|
|
||||||
EXPANSION_CASE_EXP_END,
|
|
||||||
EXPANSION_FORM_END,
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
export type Token = TagOpenStartToken|TagOpenEndToken|TagOpenEndVoidToken|TagCloseToken|
|
|
||||||
IncompleteTagOpenToken|TextToken|InterpolationToken|EncodedEntityToken|CommentStartToken|
|
|
||||||
CommentEndToken|CdataStartToken|CdataEndToken|AttributeNameToken|AttributeQuoteToken|
|
|
||||||
AttributeValueTextToken|AttributeValueInterpolationToken|DocTypeToken|ExpansionFormStartToken|
|
|
||||||
ExpansionCaseValueToken|ExpansionCaseExpressionStartToken|ExpansionCaseExpressionEndToken|
|
|
||||||
ExpansionFormEndToken|EndOfFileToken;
|
|
||||||
|
|
||||||
export type InterpolatedTextToken = TextToken|InterpolationToken|EncodedEntityToken;
|
|
||||||
|
|
||||||
export type InterpolatedAttributeToken =
|
|
||||||
AttributeValueTextToken|AttributeValueInterpolationToken|EncodedEntityToken;
|
|
||||||
|
|
||||||
export interface TokenBase {
|
|
||||||
type: TokenType;
|
|
||||||
parts: string[];
|
|
||||||
sourceSpan: ParseSourceSpan;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface TagOpenStartToken extends TokenBase {
|
|
||||||
type: TokenType.TAG_OPEN_START;
|
|
||||||
parts: [prefix: string, name: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface TagOpenEndToken extends TokenBase {
|
|
||||||
type: TokenType.TAG_OPEN_END;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface TagOpenEndVoidToken extends TokenBase {
|
|
||||||
type: TokenType.TAG_OPEN_END_VOID;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface TagCloseToken extends TokenBase {
|
|
||||||
type: TokenType.TAG_CLOSE;
|
|
||||||
parts: [prefix: string, name: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface IncompleteTagOpenToken extends TokenBase {
|
|
||||||
type: TokenType.INCOMPLETE_TAG_OPEN;
|
|
||||||
parts: [prefix: string, name: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface TextToken extends TokenBase {
|
|
||||||
type: TokenType.TEXT|TokenType.ESCAPABLE_RAW_TEXT|TokenType.RAW_TEXT;
|
|
||||||
parts: [text: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface InterpolationToken extends TokenBase {
|
|
||||||
type: TokenType.INTERPOLATION;
|
|
||||||
parts: [startMarker: string, expression: string, endMarker: string]|
|
|
||||||
[startMarker: string, expression: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface EncodedEntityToken extends TokenBase {
|
|
||||||
type: TokenType.ENCODED_ENTITY;
|
|
||||||
parts: [decoded: string, encoded: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CommentStartToken extends TokenBase {
|
|
||||||
type: TokenType.COMMENT_START;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CommentEndToken extends TokenBase {
|
|
||||||
type: TokenType.COMMENT_END;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CdataStartToken extends TokenBase {
|
|
||||||
type: TokenType.CDATA_START;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CdataEndToken extends TokenBase {
|
|
||||||
type: TokenType.CDATA_END;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AttributeNameToken extends TokenBase {
|
|
||||||
type: TokenType.ATTR_NAME;
|
|
||||||
parts: [prefix: string, name: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AttributeQuoteToken extends TokenBase {
|
|
||||||
type: TokenType.ATTR_QUOTE;
|
|
||||||
parts: [quote: '\''|'"'];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AttributeValueTextToken extends TokenBase {
|
|
||||||
type: TokenType.ATTR_VALUE_TEXT;
|
|
||||||
parts: [value: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AttributeValueInterpolationToken extends TokenBase {
|
|
||||||
type: TokenType.ATTR_VALUE_INTERPOLATION;
|
|
||||||
parts: [startMarker: string, expression: string, endMarker: string]|
|
|
||||||
[startMarker: string, expression: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface DocTypeToken extends TokenBase {
|
|
||||||
type: TokenType.DOC_TYPE;
|
|
||||||
parts: [content: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ExpansionFormStartToken extends TokenBase {
|
|
||||||
type: TokenType.EXPANSION_FORM_START;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ExpansionCaseValueToken extends TokenBase {
|
|
||||||
type: TokenType.EXPANSION_CASE_VALUE;
|
|
||||||
parts: [value: string];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ExpansionCaseExpressionStartToken extends TokenBase {
|
|
||||||
type: TokenType.EXPANSION_CASE_EXP_START;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ExpansionCaseExpressionEndToken extends TokenBase {
|
|
||||||
type: TokenType.EXPANSION_CASE_EXP_END;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ExpansionFormEndToken extends TokenBase {
|
|
||||||
type: TokenType.EXPANSION_FORM_END;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface EndOfFileToken extends TokenBase {
|
|
||||||
type: TokenType.EOF;
|
|
||||||
parts: [];
|
|
||||||
}
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
import * as html from '../../src/ml_parser/ast';
|
import * as html from '../../src/ml_parser/ast';
|
||||||
import {HtmlParser, ParseTreeResult, TreeError} from '../../src/ml_parser/html_parser';
|
import {HtmlParser, ParseTreeResult, TreeError} from '../../src/ml_parser/html_parser';
|
||||||
import {TokenType} from '../../src/ml_parser/tokens';
|
import {TokenType} from '../../src/ml_parser/lexer';
|
||||||
import {ParseError} from '../../src/parse_util';
|
import {ParseError} from '../../src/parse_util';
|
||||||
|
|
||||||
import {humanizeDom, humanizeDomSourceSpans, humanizeLineColumn, humanizeNodes} from './ast_spec_utils';
|
import {humanizeDom, humanizeDomSourceSpans, humanizeLineColumn, humanizeNodes} from './ast_spec_utils';
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue