diff --git a/packages/compiler-cli/src/ngtsc/annotations/src/component.ts b/packages/compiler-cli/src/ngtsc/annotations/src/component.ts
index 5ea6aec532..93f9061144 100644
--- a/packages/compiler-cli/src/ngtsc/annotations/src/component.ts
+++ b/packages/compiler-cli/src/ngtsc/annotations/src/component.ts
@@ -771,6 +771,9 @@ export class ComponentDecoratorHandler implements
interpolation = InterpolationConfig.fromArray(value as [string, string]);
}
+ // We always normalize line endings if the template has been escaped (i.e. is inline).
+ const i18nNormalizeLineEndingsInICUs = escapedString || this.i18nNormalizeLineEndingsInICUs;
+
const {errors, nodes: emitNodes, styleUrls, styles, ngContentSelectors} =
parseTemplate(templateStr, templateUrl, {
preserveWhitespaces,
@@ -778,7 +781,7 @@ export class ComponentDecoratorHandler implements
range: templateRange,
escapedString,
enableI18nLegacyMessageIdFormat: this.enableI18nLegacyMessageIdFormat,
- i18nNormalizeLineEndingsInICUs: this.i18nNormalizeLineEndingsInICUs,
+ i18nNormalizeLineEndingsInICUs,
});
// Unfortunately, the primary parse of the template above may not contain accurate source map
@@ -800,7 +803,7 @@ export class ComponentDecoratorHandler implements
range: templateRange,
escapedString,
enableI18nLegacyMessageIdFormat: this.enableI18nLegacyMessageIdFormat,
- i18nNormalizeLineEndingsInICUs: this.i18nNormalizeLineEndingsInICUs,
+ i18nNormalizeLineEndingsInICUs,
leadingTriviaChars: [],
});
diff --git a/packages/compiler/src/ml_parser/lexer.ts b/packages/compiler/src/ml_parser/lexer.ts
index 38a82a7d23..c6e7242b9f 100644
--- a/packages/compiler/src/ml_parser/lexer.ts
+++ b/packages/compiler/src/ml_parser/lexer.ts
@@ -642,13 +642,11 @@ class _Tokenizer {
this._beginToken(TokenType.RAW_TEXT);
const condition = this._readUntil(chars.$COMMA);
const normalizedCondition = this._processCarriageReturns(condition);
- if (this._escapedString || this._i18nNormalizeLineEndingsInICUs) {
- // Either the template is inline or,
- // we explicitly want to normalize line endings for this text.
+ if (this._i18nNormalizeLineEndingsInICUs) {
+ // We explicitly want to normalize line endings for this text.
this._endToken([normalizedCondition]);
} else {
- // The expression is in an external template and, for backward compatibility,
- // we are not normalizing line endings.
+ // We are not normalizing line endings.
const conditionToken = this._endToken([condition]);
if (normalizedCondition !== condition) {
this.nonNormalizedIcuExpressions.push(conditionToken);
diff --git a/packages/compiler/test/ml_parser/html_parser_spec.ts b/packages/compiler/test/ml_parser/html_parser_spec.ts
index af167cba94..f6a67701b4 100644
--- a/packages/compiler/test/ml_parser/html_parser_spec.ts
+++ b/packages/compiler/test/ml_parser/html_parser_spec.ts
@@ -332,40 +332,75 @@ import {humanizeDom, humanizeDomSourceSpans, humanizeLineColumn} from './ast_spe
]);
});
- it('should normalize line-endings in expansion forms in inline templates', () => {
- const parsed = parser.parse(
- `
\r\n` +
- ` {\r\n` +
- ` messages.length,\r\n` +
- ` plural,\r\n` +
- ` =0 {You have \r\nno\r\n messages}\r\n` +
- ` =1 {One {{message}}}}\r\n` +
- `
`,
- 'TestComp', {
- tokenizeExpansionForms: true,
- escapedString: true,
- });
+ it('should normalize line-endings in expansion forms in inline templates if `i18nNormalizeLineEndingsInICUs` is true',
+ () => {
+ const parsed = parser.parse(
+ `\r\n` +
+ ` {\r\n` +
+ ` messages.length,\r\n` +
+ ` plural,\r\n` +
+ ` =0 {You have \r\nno\r\n messages}\r\n` +
+ ` =1 {One {{message}}}}\r\n` +
+ `
`,
+ 'TestComp', {
+ tokenizeExpansionForms: true,
+ escapedString: true,
+ i18nNormalizeLineEndingsInICUs: true,
+ });
- expect(humanizeDom(parsed)).toEqual([
- [html.Element, 'div', 0],
- [html.Text, '\n ', 1],
- [html.Expansion, '\n messages.length', 'plural', 1],
- [html.ExpansionCase, '=0', 2],
- [html.ExpansionCase, '=1', 2],
- [html.Text, '\n', 1],
- ]);
- const cases = (parsed.rootNodes[0]).children[1].cases;
+ expect(humanizeDom(parsed)).toEqual([
+ [html.Element, 'div', 0],
+ [html.Text, '\n ', 1],
+ [html.Expansion, '\n messages.length', 'plural', 1],
+ [html.ExpansionCase, '=0', 2],
+ [html.ExpansionCase, '=1', 2],
+ [html.Text, '\n', 1],
+ ]);
+ const cases = (parsed.rootNodes[0]).children[1].cases;
- expect(humanizeDom(new ParseTreeResult(cases[0].expression, []))).toEqual([
- [html.Text, 'You have \nno\n messages', 0],
- ]);
+ expect(humanizeDom(new ParseTreeResult(cases[0].expression, []))).toEqual([
+ [html.Text, 'You have \nno\n messages', 0],
+ ]);
- expect(humanizeDom(new ParseTreeResult(cases[1].expression, []))).toEqual([
- [html.Text, 'One {{message}}', 0]
- ]);
+ expect(humanizeDom(new ParseTreeResult(cases[1].expression, []))).toEqual([
+ [html.Text, 'One {{message}}', 0]
+ ]);
- expect(parsed.errors).toEqual([]);
- });
+ expect(parsed.errors).toEqual([]);
+ });
+
+ it('should not normalize line-endings in ICU expressions in external templates when `i18nNormalizeLineEndingsInICUs` is not set',
+ () => {
+ const parsed = parser.parse(
+ `\r\n` +
+ ` {\r\n` +
+ ` messages.length,\r\n` +
+ ` plural,\r\n` +
+ ` =0 {You have \r\nno\r\n messages}\r\n` +
+ ` =1 {One {{message}}}}\r\n` +
+ `
`,
+ 'TestComp', {tokenizeExpansionForms: true, escapedString: true});
+
+ expect(humanizeDom(parsed)).toEqual([
+ [html.Element, 'div', 0],
+ [html.Text, '\n ', 1],
+ [html.Expansion, '\r\n messages.length', 'plural', 1],
+ [html.ExpansionCase, '=0', 2],
+ [html.ExpansionCase, '=1', 2],
+ [html.Text, '\n', 1],
+ ]);
+ const cases = (parsed.rootNodes[0]).children[1].cases;
+
+ expect(humanizeDom(new ParseTreeResult(cases[0].expression, []))).toEqual([
+ [html.Text, 'You have \nno\n messages', 0],
+ ]);
+
+ expect(humanizeDom(new ParseTreeResult(cases[1].expression, []))).toEqual([
+ [html.Text, 'One {{message}}', 0]
+ ]);
+
+ expect(parsed.errors).toEqual([]);
+ });
it('should normalize line-endings in expansion forms in external templates if `i18nNormalizeLineEndingsInICUs` is true',
() => {
@@ -468,33 +503,67 @@ import {humanizeDom, humanizeDomSourceSpans, humanizeLineColumn} from './ast_spe
]);
});
- it('should normalize line endings in nested expansion forms for inline templates', () => {
- const parsed = parser.parse(
- `{\r\n` +
- ` messages.length, plural,\r\n` +
- ` =0 { zero \r\n` +
- ` {\r\n` +
- ` p.gender, select,\r\n` +
- ` male {m}\r\n` +
- ` }\r\n` +
- ` }\r\n` +
- `}`,
- 'TestComp', {tokenizeExpansionForms: true, escapedString: true});
- expect(humanizeDom(parsed)).toEqual([
- [html.Expansion, '\n messages.length', 'plural', 0],
- [html.ExpansionCase, '=0', 1],
- ]);
+ it('should normalize line endings in nested expansion forms for inline templates, when `i18nNormalizeLineEndingsInICUs` is true',
+ () => {
+ const parsed = parser.parse(
+ `{\r\n` +
+ ` messages.length, plural,\r\n` +
+ ` =0 { zero \r\n` +
+ ` {\r\n` +
+ ` p.gender, select,\r\n` +
+ ` male {m}\r\n` +
+ ` }\r\n` +
+ ` }\r\n` +
+ `}`,
+ 'TestComp', {
+ tokenizeExpansionForms: true,
+ escapedString: true,
+ i18nNormalizeLineEndingsInICUs: true
+ });
+ expect(humanizeDom(parsed)).toEqual([
+ [html.Expansion, '\n messages.length', 'plural', 0],
+ [html.ExpansionCase, '=0', 1],
+ ]);
- const expansion = parsed.rootNodes[0] as html.Expansion;
- expect(humanizeDom(new ParseTreeResult(expansion.cases[0].expression, []))).toEqual([
- [html.Text, 'zero \n ', 0],
- [html.Expansion, '\n p.gender', 'select', 0],
- [html.ExpansionCase, 'male', 1],
- [html.Text, '\n ', 0],
- ]);
+ const expansion = parsed.rootNodes[0] as html.Expansion;
+ expect(humanizeDom(new ParseTreeResult(expansion.cases[0].expression, []))).toEqual([
+ [html.Text, 'zero \n ', 0],
+ [html.Expansion, '\n p.gender', 'select', 0],
+ [html.ExpansionCase, 'male', 1],
+ [html.Text, '\n ', 0],
+ ]);
- expect(parsed.errors).toEqual([]);
- });
+ expect(parsed.errors).toEqual([]);
+ });
+
+ it('should not normalize line endings in nested expansion forms for inline templates, when `i18nNormalizeLineEndingsInICUs` is not defined',
+ () => {
+ const parsed = parser.parse(
+ `{\r\n` +
+ ` messages.length, plural,\r\n` +
+ ` =0 { zero \r\n` +
+ ` {\r\n` +
+ ` p.gender, select,\r\n` +
+ ` male {m}\r\n` +
+ ` }\r\n` +
+ ` }\r\n` +
+ `}`,
+ 'TestComp', {tokenizeExpansionForms: true, escapedString: true});
+ expect(humanizeDom(parsed)).toEqual([
+ [html.Expansion, '\r\n messages.length', 'plural', 0],
+ [html.ExpansionCase, '=0', 1],
+ ]);
+
+ const expansion = parsed.rootNodes[0] as html.Expansion;
+ expect(humanizeDom(new ParseTreeResult(expansion.cases[0].expression, []))).toEqual([
+ [html.Text, 'zero \n ', 0],
+ [html.Expansion, '\r\n p.gender', 'select', 0],
+ [html.ExpansionCase, 'male', 1],
+ [html.Text, '\n ', 0],
+ ]);
+
+ expect(parsed.errors).toEqual([]);
+ });
it('should not normalize line endings in nested expansion forms for external templates, when `i18nNormalizeLineEndingsInICUs` is not set',
() => {
diff --git a/packages/compiler/test/ml_parser/lexer_spec.ts b/packages/compiler/test/ml_parser/lexer_spec.ts
index 32895b12ea..f2a650a558 100644
--- a/packages/compiler/test/ml_parser/lexer_spec.ts
+++ b/packages/compiler/test/ml_parser/lexer_spec.ts
@@ -885,75 +885,114 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
describe('[line ending normalization', () => {
describe('{escapedString: true}', () => {
- it('should normalize line-endings in expansion forms', () => {
- const result = tokenizeWithoutErrors(
- `{\r\n` +
- ` messages.length,\r\n` +
- ` plural,\r\n` +
- ` =0 {You have \r\nno\r\n messages}\r\n` +
- ` =1 {One {{message}}}}\r\n`,
- {
- tokenizeExpansionForms: true,
- escapedString: true,
- });
+ it('should normalize line-endings in expansion forms if `i18nNormalizeLineEndingsInICUs` is true',
+ () => {
+ const result = tokenizeWithoutErrors(
+ `{\r\n` +
+ ` messages.length,\r\n` +
+ ` plural,\r\n` +
+ ` =0 {You have \r\nno\r\n messages}\r\n` +
+ ` =1 {One {{message}}}}\r\n`,
+ {
+ tokenizeExpansionForms: true,
+ escapedString: true,
+ i18nNormalizeLineEndingsInICUs: true
+ });
- expect(humanizeParts(result.tokens)).toEqual([
- [lex.TokenType.EXPANSION_FORM_START],
- [lex.TokenType.RAW_TEXT, '\n messages.length'],
- [lex.TokenType.RAW_TEXT, 'plural'],
- [lex.TokenType.EXPANSION_CASE_VALUE, '=0'],
- [lex.TokenType.EXPANSION_CASE_EXP_START],
- [lex.TokenType.TEXT, 'You have \nno\n messages'],
- [lex.TokenType.EXPANSION_CASE_EXP_END],
- [lex.TokenType.EXPANSION_CASE_VALUE, '=1'],
- [lex.TokenType.EXPANSION_CASE_EXP_START],
- [lex.TokenType.TEXT, 'One {{message}}'],
- [lex.TokenType.EXPANSION_CASE_EXP_END],
- [lex.TokenType.EXPANSION_FORM_END],
- [lex.TokenType.TEXT, '\n'],
- [lex.TokenType.EOF],
- ]);
+ expect(humanizeParts(result.tokens)).toEqual([
+ [lex.TokenType.EXPANSION_FORM_START],
+ [lex.TokenType.RAW_TEXT, '\n messages.length'],
+ [lex.TokenType.RAW_TEXT, 'plural'],
+ [lex.TokenType.EXPANSION_CASE_VALUE, '=0'],
+ [lex.TokenType.EXPANSION_CASE_EXP_START],
+ [lex.TokenType.TEXT, 'You have \nno\n messages'],
+ [lex.TokenType.EXPANSION_CASE_EXP_END],
+ [lex.TokenType.EXPANSION_CASE_VALUE, '=1'],
+ [lex.TokenType.EXPANSION_CASE_EXP_START],
+ [lex.TokenType.TEXT, 'One {{message}}'],
+ [lex.TokenType.EXPANSION_CASE_EXP_END],
+ [lex.TokenType.EXPANSION_FORM_END],
+ [lex.TokenType.TEXT, '\n'],
+ [lex.TokenType.EOF],
+ ]);
- expect(result.nonNormalizedIcuExpressions).toEqual([]);
- });
+ expect(result.nonNormalizedIcuExpressions).toEqual([]);
+ });
- it('should normalize line endings in nested expansion forms for inline templates', () => {
- const result = tokenizeWithoutErrors(
- `{\r\n` +
- ` messages.length, plural,\r\n` +
- ` =0 { zero \r\n` +
- ` {\r\n` +
- ` p.gender, select,\r\n` +
- ` male {m}\r\n` +
- ` }\r\n` +
- ` }\r\n` +
- `}`,
- {tokenizeExpansionForms: true, escapedString: true});
- expect(humanizeParts(result.tokens)).toEqual([
- [lex.TokenType.EXPANSION_FORM_START],
- [lex.TokenType.RAW_TEXT, '\n messages.length'],
- [lex.TokenType.RAW_TEXT, 'plural'],
- [lex.TokenType.EXPANSION_CASE_VALUE, '=0'],
- [lex.TokenType.EXPANSION_CASE_EXP_START],
- [lex.TokenType.TEXT, 'zero \n '],
+ it('should not normalize line-endings in ICU expressions when `i18nNormalizeLineEndingsInICUs` is not defined',
+ () => {
+ const result = tokenizeWithoutErrors(
+ `{\r\n` +
+ ` messages.length,\r\n` +
+ ` plural,\r\n` +
+ ` =0 {You have \r\nno\r\n messages}\r\n` +
+ ` =1 {One {{message}}}}\r\n`,
+ {tokenizeExpansionForms: true, escapedString: true});
- [lex.TokenType.EXPANSION_FORM_START],
- [lex.TokenType.RAW_TEXT, '\n p.gender'],
- [lex.TokenType.RAW_TEXT, 'select'],
- [lex.TokenType.EXPANSION_CASE_VALUE, 'male'],
- [lex.TokenType.EXPANSION_CASE_EXP_START],
- [lex.TokenType.TEXT, 'm'],
- [lex.TokenType.EXPANSION_CASE_EXP_END],
- [lex.TokenType.EXPANSION_FORM_END],
+ expect(humanizeParts(result.tokens)).toEqual([
+ [lex.TokenType.EXPANSION_FORM_START],
+ [lex.TokenType.RAW_TEXT, '\r\n messages.length'],
+ [lex.TokenType.RAW_TEXT, 'plural'],
+ [lex.TokenType.EXPANSION_CASE_VALUE, '=0'],
+ [lex.TokenType.EXPANSION_CASE_EXP_START],
+ [lex.TokenType.TEXT, 'You have \nno\n messages'],
+ [lex.TokenType.EXPANSION_CASE_EXP_END],
+ [lex.TokenType.EXPANSION_CASE_VALUE, '=1'],
+ [lex.TokenType.EXPANSION_CASE_EXP_START],
+ [lex.TokenType.TEXT, 'One {{message}}'],
+ [lex.TokenType.EXPANSION_CASE_EXP_END],
+ [lex.TokenType.EXPANSION_FORM_END],
+ [lex.TokenType.TEXT, '\n'],
+ [lex.TokenType.EOF],
+ ]);
- [lex.TokenType.TEXT, '\n '],
- [lex.TokenType.EXPANSION_CASE_EXP_END],
- [lex.TokenType.EXPANSION_FORM_END],
- [lex.TokenType.EOF],
- ]);
+ expect(result.nonNormalizedIcuExpressions!.length).toBe(1);
+ expect(result.nonNormalizedIcuExpressions![0].sourceSpan.toString())
+ .toEqual('\r\n messages.length');
+ });
- expect(result.nonNormalizedIcuExpressions).toEqual([]);
- });
+ it('should not normalize line endings in nested expansion forms when `i18nNormalizeLineEndingsInICUs` is not defined',
+ () => {
+ const result = tokenizeWithoutErrors(
+ `{\r\n` +
+ ` messages.length, plural,\r\n` +
+ ` =0 { zero \r\n` +
+ ` {\r\n` +
+ ` p.gender, select,\r\n` +
+ ` male {m}\r\n` +
+ ` }\r\n` +
+ ` }\r\n` +
+ `}`,
+ {tokenizeExpansionForms: true, escapedString: true});
+ expect(humanizeParts(result.tokens)).toEqual([
+ [lex.TokenType.EXPANSION_FORM_START],
+ [lex.TokenType.RAW_TEXT, '\r\n messages.length'],
+ [lex.TokenType.RAW_TEXT, 'plural'],
+ [lex.TokenType.EXPANSION_CASE_VALUE, '=0'],
+ [lex.TokenType.EXPANSION_CASE_EXP_START],
+ [lex.TokenType.TEXT, 'zero \n '],
+
+ [lex.TokenType.EXPANSION_FORM_START],
+ [lex.TokenType.RAW_TEXT, '\r\n p.gender'],
+ [lex.TokenType.RAW_TEXT, 'select'],
+ [lex.TokenType.EXPANSION_CASE_VALUE, 'male'],
+ [lex.TokenType.EXPANSION_CASE_EXP_START],
+ [lex.TokenType.TEXT, 'm'],
+ [lex.TokenType.EXPANSION_CASE_EXP_END],
+ [lex.TokenType.EXPANSION_FORM_END],
+
+ [lex.TokenType.TEXT, '\n '],
+ [lex.TokenType.EXPANSION_CASE_EXP_END],
+ [lex.TokenType.EXPANSION_FORM_END],
+ [lex.TokenType.EOF],
+ ]);
+
+ expect(result.nonNormalizedIcuExpressions!.length).toBe(2);
+ expect(result.nonNormalizedIcuExpressions![0].sourceSpan.toString())
+ .toEqual('\r\n messages.length');
+ expect(result.nonNormalizedIcuExpressions![1].sourceSpan.toString())
+ .toEqual('\r\n p.gender');
+ });
});
describe('{escapedString: false}', () => {