524 lines
19 KiB
TypeScript
524 lines
19 KiB
TypeScript
import {ddescribe, describe, it, iit, xit, expect, beforeEach, afterEach} from '../../test_lib';
|
|
import {BaseException} from '../../src/facade/exceptions';
|
|
|
|
import {tokenizeHtml, HtmlToken, HtmlTokenType} from '../../src/compiler/html_lexer';
|
|
import {ParseSourceSpan, ParseLocation} from '../../src/compiler/parse_util';
|
|
|
|
export function main() {
|
|
describe('HtmlLexer', () => {
|
|
describe('line/column numbers', () => {
|
|
it('should work without newlines', () => {
|
|
expect(tokenizeAndHumanizeLineColumn('<t>a</t>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '0:0'],
|
|
[HtmlTokenType.TAG_OPEN_END, '0:2'],
|
|
[HtmlTokenType.TEXT, '0:3'],
|
|
[HtmlTokenType.TAG_CLOSE, '0:4'],
|
|
[HtmlTokenType.EOF, '0:8']
|
|
]);
|
|
});
|
|
|
|
it('should work with one newline', () => {
|
|
expect(tokenizeAndHumanizeLineColumn('<t>\na</t>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '0:0'],
|
|
[HtmlTokenType.TAG_OPEN_END, '0:2'],
|
|
[HtmlTokenType.TEXT, '0:3'],
|
|
[HtmlTokenType.TAG_CLOSE, '1:1'],
|
|
[HtmlTokenType.EOF, '1:5']
|
|
]);
|
|
});
|
|
|
|
it('should work with multiple newlines', () => {
|
|
expect(tokenizeAndHumanizeLineColumn('<t\n>\na</t>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '0:0'],
|
|
[HtmlTokenType.TAG_OPEN_END, '1:0'],
|
|
[HtmlTokenType.TEXT, '1:1'],
|
|
[HtmlTokenType.TAG_CLOSE, '2:1'],
|
|
[HtmlTokenType.EOF, '2:5']
|
|
]);
|
|
});
|
|
});
|
|
|
|
describe('comments', () => {
|
|
it('should parse comments', () => {
|
|
expect(tokenizeAndHumanizeParts('<!--test-->'))
|
|
.toEqual([
|
|
[HtmlTokenType.COMMENT_START],
|
|
[HtmlTokenType.RAW_TEXT, 'test'],
|
|
[HtmlTokenType.COMMENT_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should store the locations', () => {expect(tokenizeAndHumanizeSourceSpans('<!--test-->'))
|
|
.toEqual([
|
|
[HtmlTokenType.COMMENT_START, '<!--'],
|
|
[HtmlTokenType.RAW_TEXT, 'test'],
|
|
[HtmlTokenType.COMMENT_END, '-->'],
|
|
[HtmlTokenType.EOF, '']
|
|
])});
|
|
|
|
it('should report <!- without -', () => {
|
|
expect(tokenizeAndHumanizeErrors('<!-a'))
|
|
.toEqual([[HtmlTokenType.COMMENT_START, 'Unexpected character "a"', '0:3']]);
|
|
});
|
|
|
|
it('should report missing end comment', () => {
|
|
expect(tokenizeAndHumanizeErrors('<!--'))
|
|
.toEqual([[HtmlTokenType.RAW_TEXT, 'Unexpected character "EOF"', '0:4']]);
|
|
});
|
|
});
|
|
|
|
describe('doctype', () => {
|
|
it('should parse doctypes', () => {
|
|
expect(tokenizeAndHumanizeParts('<!doctype html>'))
|
|
.toEqual([[HtmlTokenType.DOC_TYPE, 'doctype html'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('<!doctype html>'))
|
|
.toEqual([[HtmlTokenType.DOC_TYPE, '<!doctype html>'], [HtmlTokenType.EOF, '']]);
|
|
});
|
|
|
|
it('should report missing end doctype', () => {
|
|
expect(tokenizeAndHumanizeErrors('<!'))
|
|
.toEqual([[HtmlTokenType.DOC_TYPE, 'Unexpected character "EOF"', '0:2']]);
|
|
});
|
|
});
|
|
|
|
describe('cdata', () => {
|
|
it('should parse cdata', () => {
|
|
expect(tokenizeAndHumanizeParts('<![cdata[test]]>'))
|
|
.toEqual([
|
|
[HtmlTokenType.CDATA_START],
|
|
[HtmlTokenType.RAW_TEXT, 'test'],
|
|
[HtmlTokenType.CDATA_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('<![cdata[test]]>'))
|
|
.toEqual([
|
|
[HtmlTokenType.CDATA_START, '<![cdata['],
|
|
[HtmlTokenType.RAW_TEXT, 'test'],
|
|
[HtmlTokenType.CDATA_END, ']]>'],
|
|
[HtmlTokenType.EOF, '']
|
|
]);
|
|
});
|
|
|
|
it('should report <![ without cdata[', () => {
|
|
expect(tokenizeAndHumanizeErrors('<![a'))
|
|
.toEqual([[HtmlTokenType.CDATA_START, 'Unexpected character "a"', '0:3']]);
|
|
});
|
|
|
|
it('should report missing end cdata', () => {
|
|
expect(tokenizeAndHumanizeErrors('<![cdata['))
|
|
.toEqual([[HtmlTokenType.RAW_TEXT, 'Unexpected character "EOF"', '0:9']]);
|
|
});
|
|
});
|
|
|
|
describe('open tags', () => {
|
|
it('should parse open tags without prefix', () => {
|
|
expect(tokenizeAndHumanizeParts('<test>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'test'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse namespace prefix', () => {
|
|
expect(tokenizeAndHumanizeParts('<ns1:test>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, 'ns1', 'test'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse void tags', () => {
|
|
expect(tokenizeAndHumanizeParts('<test/>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'test'],
|
|
[HtmlTokenType.TAG_OPEN_END_VOID],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should allow whitespace', () => {
|
|
expect(tokenizeAndHumanizeParts('< test >'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'test'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('<test>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '<test'],
|
|
[HtmlTokenType.TAG_OPEN_END, '>'],
|
|
[HtmlTokenType.EOF, '']
|
|
]);
|
|
});
|
|
|
|
it('should report missing name after <', () => {
|
|
expect(tokenizeAndHumanizeErrors('<'))
|
|
.toEqual([[HtmlTokenType.TAG_OPEN_START, 'Unexpected character "EOF"', '0:1']]);
|
|
});
|
|
|
|
it('should report missing >', () => {
|
|
expect(tokenizeAndHumanizeErrors('<name'))
|
|
.toEqual([[HtmlTokenType.TAG_OPEN_START, 'Unexpected character "EOF"', '0:5']]);
|
|
});
|
|
});
|
|
|
|
describe('attributes', () => {
|
|
it('should parse attributes without prefix', () => {
|
|
expect(tokenizeAndHumanizeParts('<t a>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, 'a'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse attributes with prefix', () => {
|
|
expect(tokenizeAndHumanizeParts('<t ns1:a>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, 'ns1', 'a'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse attributes whose prefix is not valid', () => {
|
|
expect(tokenizeAndHumanizeParts('<t (ns1:a)>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, '(ns1:a)'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse attributes with single quote value', () => {
|
|
expect(tokenizeAndHumanizeParts("<t a='b'>"))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, 'a'],
|
|
[HtmlTokenType.ATTR_VALUE, 'b'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse attributes with double quote value', () => {
|
|
expect(tokenizeAndHumanizeParts('<t a="b">'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, 'a'],
|
|
[HtmlTokenType.ATTR_VALUE, 'b'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse attributes with unquoted value', () => {
|
|
expect(tokenizeAndHumanizeParts('<t a=b>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, 'a'],
|
|
[HtmlTokenType.ATTR_VALUE, 'b'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should allow whitespace', () => {
|
|
expect(tokenizeAndHumanizeParts('<t a = b >'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, 'a'],
|
|
[HtmlTokenType.ATTR_VALUE, 'b'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should parse attributes with entities in values', () => {
|
|
expect(tokenizeAndHumanizeParts('<t a="A">'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 't'],
|
|
[HtmlTokenType.ATTR_NAME, null, 'a'],
|
|
[HtmlTokenType.ATTR_VALUE, 'A'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('<t a=b>'))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '<t'],
|
|
[HtmlTokenType.ATTR_NAME, 'a'],
|
|
[HtmlTokenType.ATTR_VALUE, 'b'],
|
|
[HtmlTokenType.TAG_OPEN_END, '>'],
|
|
[HtmlTokenType.EOF, '']
|
|
]);
|
|
});
|
|
|
|
it('should report missing value after =', () => {
|
|
expect(tokenizeAndHumanizeErrors('<name a='))
|
|
.toEqual([[HtmlTokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8']]);
|
|
});
|
|
|
|
it('should report missing end quote for \'', () => {
|
|
expect(tokenizeAndHumanizeErrors('<name a=\''))
|
|
.toEqual([[HtmlTokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:9']]);
|
|
});
|
|
|
|
it('should report missing end quote for "', () => {
|
|
expect(tokenizeAndHumanizeErrors('<name a="'))
|
|
.toEqual([[HtmlTokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:9']]);
|
|
});
|
|
});
|
|
|
|
describe('closing tags', () => {
|
|
it('should parse closing tags without prefix', () => {
|
|
expect(tokenizeAndHumanizeParts('</test>'))
|
|
.toEqual([[HtmlTokenType.TAG_CLOSE, null, 'test'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should parse closing tags with prefix', () => {
|
|
expect(tokenizeAndHumanizeParts('</ns1:test>'))
|
|
.toEqual([[HtmlTokenType.TAG_CLOSE, 'ns1', 'test'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should allow whitespace', () => {
|
|
expect(tokenizeAndHumanizeParts('</ test >'))
|
|
.toEqual([[HtmlTokenType.TAG_CLOSE, null, 'test'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('</test>'))
|
|
.toEqual([[HtmlTokenType.TAG_CLOSE, '</test>'], [HtmlTokenType.EOF, '']]);
|
|
});
|
|
|
|
it('should report missing name after </', () => {
|
|
expect(tokenizeAndHumanizeErrors('</'))
|
|
.toEqual([[HtmlTokenType.TAG_CLOSE, 'Unexpected character "EOF"', '0:2']]);
|
|
});
|
|
|
|
it('should report missing >', () => {
|
|
expect(tokenizeAndHumanizeErrors('</test'))
|
|
.toEqual([[HtmlTokenType.TAG_CLOSE, 'Unexpected character "EOF"', '0:6']]);
|
|
});
|
|
});
|
|
|
|
describe('entities', () => {
|
|
it('should parse named entities', () => {
|
|
expect(tokenizeAndHumanizeParts('a&b'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'a&b'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should parse hexadecimal entities', () => {
|
|
expect(tokenizeAndHumanizeParts('A'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'A'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should parse decimal entities', () => {
|
|
expect(tokenizeAndHumanizeParts('A'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'A'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('a&b'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'a&b'], [HtmlTokenType.EOF, '']]);
|
|
});
|
|
|
|
it('should report unknown named entities >', () => {
|
|
expect(tokenizeAndHumanizeErrors('&tbo;'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'Unknown entity "tbo"', '0:0']]);
|
|
expect(tokenizeAndHumanizeErrors('&#asdf;'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'Unknown entity "#asdf"', '0:0']]);
|
|
expect(tokenizeAndHumanizeErrors('
sdf;'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'Unknown entity "#xasdf"', '0:0']]);
|
|
});
|
|
});
|
|
|
|
describe('regular text', () => {
|
|
it('should parse text', () => {
|
|
expect(tokenizeAndHumanizeParts('a'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'a'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should parse entities', () => {
|
|
expect(tokenizeAndHumanizeParts('a&b'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'a&b'], [HtmlTokenType.EOF]]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans('a'))
|
|
.toEqual([[HtmlTokenType.TEXT, 'a'], [HtmlTokenType.EOF, '']]);
|
|
});
|
|
});
|
|
|
|
describe('raw text', () => {
|
|
it('should parse text', () => {
|
|
expect(tokenizeAndHumanizeParts(`<script>a</script>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'script'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.RAW_TEXT, 'a'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'script'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should not detect entities', () => {
|
|
expect(tokenizeAndHumanizeParts(`<script>&</script>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'script'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.RAW_TEXT, '&'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'script'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should ignore other opening tags', () => {
|
|
expect(tokenizeAndHumanizeParts(`<script>a<div></script>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'script'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.RAW_TEXT, 'a<div>'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'script'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should ignore other closing tags', () => {
|
|
expect(tokenizeAndHumanizeParts(`<script>a</test></script>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'script'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.RAW_TEXT, 'a</test>'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'script'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans(`<script>a</script>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '<script'],
|
|
[HtmlTokenType.TAG_OPEN_END, '>'],
|
|
[HtmlTokenType.RAW_TEXT, 'a'],
|
|
[HtmlTokenType.TAG_CLOSE, '</script>'],
|
|
[HtmlTokenType.EOF, '']
|
|
]);
|
|
});
|
|
|
|
});
|
|
|
|
describe('escapable raw text', () => {
|
|
it('should parse text', () => {
|
|
expect(tokenizeAndHumanizeParts(`<title>a</title>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'title'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.ESCAPABLE_RAW_TEXT, 'a'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'title'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should detect entities', () => {
|
|
expect(tokenizeAndHumanizeParts(`<title>&</title>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'title'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.ESCAPABLE_RAW_TEXT, '&'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'title'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should ignore other opening tags', () => {
|
|
expect(tokenizeAndHumanizeParts(`<title>a<div></title>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'title'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.ESCAPABLE_RAW_TEXT, 'a<div>'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'title'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should ignore other closing tags', () => {
|
|
expect(tokenizeAndHumanizeParts(`<title>a</test></title>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, null, 'title'],
|
|
[HtmlTokenType.TAG_OPEN_END],
|
|
[HtmlTokenType.ESCAPABLE_RAW_TEXT, 'a</test>'],
|
|
[HtmlTokenType.TAG_CLOSE, null, 'title'],
|
|
[HtmlTokenType.EOF]
|
|
]);
|
|
});
|
|
|
|
it('should store the locations', () => {
|
|
expect(tokenizeAndHumanizeSourceSpans(`<title>a</title>`))
|
|
.toEqual([
|
|
[HtmlTokenType.TAG_OPEN_START, '<title'],
|
|
[HtmlTokenType.TAG_OPEN_END, '>'],
|
|
[HtmlTokenType.ESCAPABLE_RAW_TEXT, 'a'],
|
|
[HtmlTokenType.TAG_CLOSE, '</title>'],
|
|
[HtmlTokenType.EOF, '']
|
|
]);
|
|
});
|
|
|
|
});
|
|
|
|
});
|
|
}
|
|
|
|
function tokenizeWithoutErrors(input: string): HtmlToken[] {
|
|
var tokenizeResult = tokenizeHtml(input, 'someUrl');
|
|
if (tokenizeResult.errors.length > 0) {
|
|
var errorString = tokenizeResult.errors.join('\n');
|
|
throw new BaseException(`Unexpected parse errors:\n${errorString}`);
|
|
}
|
|
return tokenizeResult.tokens;
|
|
}
|
|
|
|
function tokenizeAndHumanizeParts(input: string): any[] {
|
|
return tokenizeWithoutErrors(input).map(token => [<any>token.type].concat(token.parts));
|
|
}
|
|
|
|
function tokenizeAndHumanizeSourceSpans(input: string): any[] {
|
|
return tokenizeWithoutErrors(input).map(token => [<any>token.type, token.sourceSpan.toString()]);
|
|
}
|
|
|
|
function humanizeLineColumn(location: ParseLocation): string {
|
|
return `${location.line}:${location.col}`;
|
|
}
|
|
|
|
function tokenizeAndHumanizeLineColumn(input: string): any[] {
|
|
return tokenizeWithoutErrors(input)
|
|
.map(token => [<any>token.type, humanizeLineColumn(token.sourceSpan.start)]);
|
|
}
|
|
|
|
function tokenizeAndHumanizeErrors(input: string): any[] {
|
|
return tokenizeHtml(input, 'someUrl')
|
|
.errors.map(
|
|
tokenError =>
|
|
[<any>tokenError.tokenType, tokenError.msg, humanizeLineColumn(tokenError.location)]);
|
|
}
|