diff --git a/packages/compiler-cli/test/compliance/test_cases/source_mapping/external_templates/escaped_chars_partial.js b/packages/compiler-cli/test/compliance/test_cases/source_mapping/external_templates/escaped_chars_partial.js index 9fd906166a..3dd08974b4 100644 --- a/packages/compiler-cli/test/compliance/test_cases/source_mapping/external_templates/escaped_chars_partial.js +++ b/packages/compiler-cli/test/compliance/test_cases/source_mapping/external_templates/escaped_chars_partial.js @@ -1,6 +1,6 @@ .ɵɵelementStart(0, "div") // SOURCE: "/escaped_chars.html" "
" +"\uFFFD#2\uFFFD" // SOURCE: "/i18n_message_element_whitespace.ts" "
\\n " … -}:START_PARAGRAPH: in-p ${ // SOURCE: "/i18n_message_element_whitespace.ts" "\\n in-p\\n " +}:START_PARAGRAPH: in-p ${ // SOURCE: "/i18n_message_element_whitespace.ts" "in-p\\n " … "\uFFFD/#2\uFFFD" // SOURCE: "/i18n_message_element_whitespace.ts" "
" … -}:CLOSE_PARAGRAPH: post-p\n` // SOURCE: "/i18n_message_element_whitespace.ts" "\\n post-p\\n" +}:CLOSE_PARAGRAPH: post-p\n` // SOURCE: "/i18n_message_element_whitespace.ts" "post-p\\n" … i0.ɵɵelementStart(0, "div") // SOURCE: "/i18n_message_element_whitespace.ts" "" +i0.ɵɵelement(2, "p") // SOURCE: "/i18n_message_element_whitespace.ts" "
\\n " … i0.ɵɵi18nEnd() // SOURCE: "/i18n_message_element_whitespace.ts" "
" +"\uFFFD#2\uFFFD" // SOURCE: "/i18n_message_element_whitespace.ts" "
\\n " … -}:START_PARAGRAPH: in-p ${ // SOURCE: "/i18n_message_element_whitespace.ts" "\\n in-p\\n " +}:START_PARAGRAPH: in-p ${ // SOURCE: "/i18n_message_element_whitespace.ts" "in-p\\n " … -"\uFFFD/#2\uFFFD" // SOURCE: "/i18n_message_element_whitespace.ts" "
" +"\uFFFD/#2\uFFFD" // SOURCE: "/i18n_message_element_whitespace.ts" "\\n " … -}:CLOSE_PARAGRAPH: post-p\n` // SOURCE: "/i18n_message_element_whitespace.ts" "\\n post-p\\n" +}:CLOSE_PARAGRAPH: post-p\n` // SOURCE: "/i18n_message_element_whitespace.ts" "post-p\\n" … -.ɵɵelementStart(0, "div") // SOURCE: "/i18n_message_element_whitespace.ts" "" +.ɵɵelement(2, "p") // SOURCE: "/i18n_message_element_whitespace.ts" "
\\n " … .ɵɵi18nEnd() // SOURCE: "/i18n_message_element_whitespace.ts" "
', + source: '
\n ', generated: '"\\uFFFD#2\\uFFFD"', }); expectMapping(mappings, { sourceUrl: '../test.ts', - source: '\n in-p\n ', + source: 'in-p\n ', generated: '}:START_PARAGRAPH: in-p ${', }); expectMapping(mappings, { sourceUrl: '../test.ts', - source: '
', + source: '\n ', generated: '"\\uFFFD/#2\\uFFFD"', }); expectMapping(mappings, { sourceUrl: '../test.ts', - source: '\n post-p\n', + source: 'post-p\n', generated: '}:CLOSE_PARAGRAPH: post-p\n`', }); // ivy instructions diff --git a/packages/compiler/src/i18n/i18n_parser.ts b/packages/compiler/src/i18n/i18n_parser.ts index dda395f873..67791e7c7b 100644 --- a/packages/compiler/src/i18n/i18n_parser.ts +++ b/packages/compiler/src/i18n/i18n_parser.ts @@ -7,11 +7,10 @@ */ import {Lexer as ExpressionLexer} from '../expression_parser/lexer'; -import {Parser as ExpressionParser} from '../expression_parser/parser'; +import {InterpolationPiece, Parser as ExpressionParser} from '../expression_parser/parser'; import * as html from '../ml_parser/ast'; import {getHtmlTagDefinition} from '../ml_parser/html_tags'; import {InterpolationConfig} from '../ml_parser/interpolation_config'; -import {Token, TokenType} from '../ml_parser/lexer'; import {ParseSourceSpan} from '../parse_util'; import * as i18n from './i18n_ast'; @@ -106,18 +105,13 @@ class _I18nVisitor implements html.Visitor { } visitAttribute(attribute: html.Attribute, context: I18nMessageVisitorContext): i18n.Node { - const node = attribute.valueTokens === undefined || attribute.valueTokens.length === 1 ? - new i18n.Text(attribute.value, attribute.valueSpan || attribute.sourceSpan) : - this._visitTextWithInterpolation( - attribute.valueTokens, attribute.valueSpan || attribute.sourceSpan, context, - attribute.i18n); + const node = this._visitTextWithInterpolation( + attribute.value, attribute.valueSpan || attribute.sourceSpan, context, attribute.i18n); return context.visitNodeFn(attribute, node); } visitText(text: html.Text, context: I18nMessageVisitorContext): i18n.Node { - const node = text.tokens.length === 1 ? - new i18n.Text(text.value, text.sourceSpan) : - this._visitTextWithInterpolation(text.tokens, text.sourceSpan, context, text.i18n); + const node = this._visitTextWithInterpolation(text.value, text.sourceSpan, context, text.i18n); return context.visitNodeFn(text, node); } @@ -171,36 +165,66 @@ class _I18nVisitor implements html.Visitor { * @param previousI18n Any i18n metadata associated with this `text` from a previous pass. */ private _visitTextWithInterpolation( - tokens: Token[], sourceSpan: ParseSourceSpan, context: I18nMessageVisitorContext, + text: string, sourceSpan: ParseSourceSpan, context: I18nMessageVisitorContext, previousI18n: i18n.I18nMeta|undefined): i18n.Node { + const {strings, expressions} = this._expressionParser.splitInterpolation( + text, sourceSpan.start.toString(), this._interpolationConfig); + + // No expressions, return a single text. + if (expressions.length === 0) { + return new i18n.Text(text, sourceSpan); + } + // Return a sequence of `Text` and `Placeholder` nodes grouped in a `Container`. const nodes: i18n.Node[] = []; - for (const token of tokens) { - switch (token.type) { - case TokenType.INTERPOLATION: - case TokenType.ATTR_VALUE_INTERPOLATION: - const expression = token.parts[1]; - const baseName = extractPlaceholderName(expression) || 'INTERPOLATION'; - const phName = context.placeholderRegistry.getPlaceholderName(baseName, expression); - context.placeholderToContent[phName] = { - text: token.parts.join(''), - sourceSpan: token.sourceSpan - }; - nodes.push(new i18n.Placeholder(expression, phName, token.sourceSpan)); - break; - default: - if (token.parts[0].length > 0) { - nodes.push(new i18n.Text(token.parts[0], token.sourceSpan)); - } - break; - } + for (let i = 0; i < strings.length - 1; i++) { + this._addText(nodes, strings[i], sourceSpan); + this._addPlaceholder(nodes, context, expressions[i], sourceSpan); } + // The last index contains no expression + this._addText(nodes, strings[strings.length - 1], sourceSpan); // Whitespace removal may have invalidated the interpolation source-spans. reusePreviousSourceSpans(nodes, previousI18n); return new i18n.Container(nodes, sourceSpan); } + + /** + * Create a new `Text` node from the `textPiece` and add it to the `nodes` collection. + * + * @param nodes The nodes to which the created `Text` node should be added. + * @param textPiece The text and relative span information for this `Text` node. + * @param interpolationSpan The span of the whole interpolated text. + */ + private _addText( + nodes: i18n.Node[], textPiece: InterpolationPiece, interpolationSpan: ParseSourceSpan): void { + if (textPiece.text.length > 0) { + // No need to add empty strings + const stringSpan = getOffsetSourceSpan(interpolationSpan, textPiece); + nodes.push(new i18n.Text(textPiece.text, stringSpan)); + } + } + + /** + * Create a new `Placeholder` node from the `expression` and add it to the `nodes` collection. + * + * @param nodes The nodes to which the created `Text` node should be added. + * @param context The current context of the visitor, used to compute and store placeholders. + * @param expression The expression text and relative span information for this `Placeholder` + * node. + * @param interpolationSpan The span of the whole interpolated text. + */ + private _addPlaceholder( + nodes: i18n.Node[], context: I18nMessageVisitorContext, expression: InterpolationPiece, + interpolationSpan: ParseSourceSpan): void { + const sourceSpan = getOffsetSourceSpan(interpolationSpan, expression); + const baseName = extractPlaceholderName(expression.text) || 'INTERPOLATION'; + const phName = context.placeholderRegistry.getPlaceholderName(baseName, expression.text); + const text = this._interpolationConfig.start + expression.text + this._interpolationConfig.end; + context.placeholderToContent[phName] = {text, sourceSpan}; + nodes.push(new i18n.Placeholder(expression.text, phName, sourceSpan)); + } } /** @@ -223,7 +247,7 @@ function reusePreviousSourceSpans(nodes: i18n.Node[], previousI18n: i18n.I18nMet if (previousI18n instanceof i18n.Container) { // The `previousI18n` is a `Container`, which means that this is a second i18n extraction pass - // after whitespace has been removed from the AST nodes. + // after whitespace has been removed from the AST ndoes. assertEquivalentNodes(previousI18n.children, nodes); // Reuse the source-spans from the first pass. @@ -258,6 +282,14 @@ function assertEquivalentNodes(previousNodes: i18n.Node[], nodes: i18n.Node[]): } } +/** + * Create a new `ParseSourceSpan` from the `sourceSpan`, offset by the `start` and `end` values. + */ +function getOffsetSourceSpan( + sourceSpan: ParseSourceSpan, {start, end}: InterpolationPiece): ParseSourceSpan { + return new ParseSourceSpan(sourceSpan.fullStart.moveBy(start), sourceSpan.fullStart.moveBy(end)); +} + const _CUSTOM_PH_EXP = /\/\/[\s\S]*i18n[\s\S]*\([\s\S]*ph[\s\S]*=[\s\S]*("|')([\s\S]*?)\1[\s\S]*\)/g; diff --git a/packages/compiler/src/ml_parser/lexer.ts b/packages/compiler/src/ml_parser/lexer.ts index 3c95825e34..13d3a6bfba 100644 --- a/packages/compiler/src/ml_parser/lexer.ts +++ b/packages/compiler/src/ml_parser/lexer.ts @@ -230,11 +230,8 @@ class _Tokenizer { this._consumeTagOpen(start); } } else if (!(this._tokenizeIcu && this._tokenizeExpansionForm())) { - // In (possibly interpolated) text the end of the text is given by `isTextEnd()`, while - // the premature end of an interpolation is given by the start of a new HTML element. this._consumeWithInterpolation( - TokenType.TEXT, TokenType.INTERPOLATION, () => this._isTextEnd(), - () => this._isTagStart()); + TokenType.TEXT, TokenType.INTERPOLATION, () => this._isTextEnd()); } } catch (e) { this.handleError(e); @@ -611,18 +608,14 @@ class _Tokenizer { if (this._cursor.peek() === chars.$SQ || this._cursor.peek() === chars.$DQ) { const quoteChar = this._cursor.peek(); this._consumeQuote(quoteChar); - // In an attribute then end of the attribute value and the premature end to an interpolation - // are both triggered by the `quoteChar`. - const endPredicate = () => this._cursor.peek() === quoteChar; this._consumeWithInterpolation( - TokenType.ATTR_VALUE_TEXT, TokenType.ATTR_VALUE_INTERPOLATION, endPredicate, - endPredicate); + TokenType.ATTR_VALUE_TEXT, TokenType.ATTR_VALUE_INTERPOLATION, + () => this._cursor.peek() === quoteChar); this._consumeQuote(quoteChar); } else { const endPredicate = () => isNameEnd(this._cursor.peek()); this._consumeWithInterpolation( - TokenType.ATTR_VALUE_TEXT, TokenType.ATTR_VALUE_INTERPOLATION, endPredicate, - endPredicate); + TokenType.ATTR_VALUE_TEXT, TokenType.ATTR_VALUE_INTERPOLATION, endPredicate); } } @@ -712,21 +705,15 @@ class _Tokenizer { /** * Consume a string that may contain interpolation expressions. - * * The first token consumed will be of `tokenType` and then there will be alternating * `interpolationTokenType` and `tokenType` tokens until the `endPredicate()` returns true. * - * If an interpolation token ends prematurely it will have no end marker in its `parts` array. - * * @param textTokenType the kind of tokens to interleave around interpolation tokens. * @param interpolationTokenType the kind of tokens that contain interpolation. * @param endPredicate a function that should return true when we should stop consuming. - * @param endInterpolation a function that should return true if there is a premature end to an - * interpolation expression - i.e. before we get to the normal interpolation closing marker. */ private _consumeWithInterpolation( - textTokenType: TokenType, interpolationTokenType: TokenType, endPredicate: () => boolean, - endInterpolation: () => boolean) { + textTokenType: TokenType, interpolationTokenType: TokenType, endPredicate: () => boolean) { this._beginToken(textTokenType); const parts: string[] = []; @@ -735,7 +722,7 @@ class _Tokenizer { if (this._interpolationConfig && this._attemptStr(this._interpolationConfig.start)) { this._endToken([this._processCarriageReturns(parts.join(''))], current); parts.length = 0; - this._consumeInterpolation(interpolationTokenType, current, endInterpolation); + this._consumeInterpolation(interpolationTokenType, current); this._beginToken(textTokenType); } else if (this._cursor.peek() === chars.$AMPERSAND) { this._endToken([this._processCarriageReturns(parts.join(''))]); @@ -754,17 +741,8 @@ class _Tokenizer { this._endToken([this._processCarriageReturns(parts.join(''))]); } - /** - * Consume a block of text that has been interpreted as an Angular interpolation. - * - * @param interpolationTokenType the type of the interpolation token to generate. - * @param interpolationStart a cursor that points to the start of this interpolation. - * @param prematureEndPredicate a function that should return true if the next characters indicate - * an end to the interpolation before its normal closing marker. - */ private _consumeInterpolation( - interpolationTokenType: TokenType, interpolationStart: CharacterCursor, - prematureEndPredicate: (() => boolean)|null) { + interpolationTokenType: TokenType, interpolationStart: CharacterCursor) { const parts: string[] = []; this._beginToken(interpolationTokenType, interpolationStart); parts.push(this._interpolationConfig.start); @@ -773,8 +751,7 @@ class _Tokenizer { const expressionStart = this._cursor.clone(); let inQuote: number|null = null; let inComment = false; - while (this._cursor.peek() !== chars.$EOF && - (prematureEndPredicate === null || !prematureEndPredicate())) { + while (this._cursor.peek() !== chars.$EOF) { const current = this._cursor.clone(); if (this._isTagStart()) { @@ -806,7 +783,7 @@ class _Tokenizer { } else if (char === inQuote) { // Exiting the current quoted string inQuote = null; - } else if (!inComment && inQuote === null && chars.isQuote(char)) { + } else if (!inComment && chars.isQuote(char)) { // Entering a new quoted string inQuote = char; } @@ -818,7 +795,7 @@ class _Tokenizer { } private _getProcessedChars(start: CharacterCursor, end: CharacterCursor): string { - return this._processCarriageReturns(end.getChars(start)); + return this._processCarriageReturns(end.getChars(start)) } private _isTextEnd(): boolean { diff --git a/packages/compiler/src/render3/view/i18n/localize_utils.ts b/packages/compiler/src/render3/view/i18n/localize_utils.ts index 7579d3ebb8..85b4178edf 100644 --- a/packages/compiler/src/render3/view/i18n/localize_utils.ts +++ b/packages/compiler/src/render3/view/i18n/localize_utils.ts @@ -35,10 +35,7 @@ class LocalizeSerializerVisitor implements i18n.Visitor { // Two literal pieces in a row means that there was some comment node in-between. context[context.length - 1].text += text.value; } else { - const sourceSpan = new ParseSourceSpan( - text.sourceSpan.fullStart, text.sourceSpan.end, text.sourceSpan.fullStart, - text.sourceSpan.details); - context.push(new o.LiteralPiece(text.value, sourceSpan)); + context.push(new o.LiteralPiece(text.value, text.sourceSpan)); } } @@ -93,7 +90,7 @@ function getSourceSpan(message: i18n.Message): ParseSourceSpan { const startNode = message.nodes[0]; const endNode = message.nodes[message.nodes.length - 1]; return new ParseSourceSpan( - startNode.sourceSpan.fullStart, endNode.sourceSpan.end, startNode.sourceSpan.fullStart, + startNode.sourceSpan.start, endNode.sourceSpan.end, startNode.sourceSpan.fullStart, startNode.sourceSpan.details); } diff --git a/packages/compiler/test/ml_parser/lexer_spec.ts b/packages/compiler/test/ml_parser/lexer_spec.ts index c0302a3345..bc8559221b 100644 --- a/packages/compiler/test/ml_parser/lexer_spec.ts +++ b/packages/compiler/test/ml_parser/lexer_spec.ts @@ -316,32 +316,6 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u ]); }); - it('should end interpolation on an unescaped matching quote', () => { - expect(tokenizeAndHumanizeParts('{{ a
')).toEqual([ - [lex.TokenType.TAG_OPEN_START, '', 'p'], - [lex.TokenType.TAG_OPEN_END], - [lex.TokenType.TEXT, ''], - [lex.TokenType.INTERPOLATION, '{{', ' a '], - [lex.TokenType.TEXT, ''], - [lex.TokenType.TAG_CLOSE, '', 'p'], - [lex.TokenType.EOF], - ]); - }); - it('should break out of interpolation in text token on valid CDATA', () => { expect(tokenizeAndHumanizeParts('{{ a }}')).toEqual([ [lex.TokenType.TEXT, ''], diff --git a/packages/compiler/test/render3/view/i18n_spec.ts b/packages/compiler/test/render3/view/i18n_spec.ts index 25a1a9c0d8..170f48ca5d 100644 --- a/packages/compiler/test/render3/view/i18n_spec.ts +++ b/packages/compiler/test/render3/view/i18n_spec.ts @@ -478,7 +478,7 @@ describe('serializeI18nMessageForLocalize', () => { expect(messageParts[3].text).toEqual(''); expect(messageParts[3].sourceSpan.toString()).toEqual(''); expect(messageParts[4].text).toEqual(' D'); - expect(messageParts[4].sourceSpan.toString()).toEqual(' D'); + expect(messageParts[4].sourceSpan.toString()).toEqual('D'); expect(placeHolders[0].text).toEqual('START_TAG_SPAN'); expect(placeHolders[0].sourceSpan.toString()).toEqual('');