feat(compiler): support tokenizing a sub-section of an input string (#28055)

The lexer that does the tokenizing can now process only a part the source
string, by passing a `range` property in the `options` argument. The
locations of the nodes that are tokenized will now take into account the
position of the span in the context of the original source string.

This `range` option is, in turn, exposed from the template parser as well.

Being able to process parts of files helps to enable SourceMap support
when compiling inline component templates.

PR Close #28055
This commit is contained in:
Pete Bacon Darwin
2019-02-08 22:10:19 +00:00
committed by Misko Hevery
parent 1b0580a9ec
commit eeb560ac88
3 changed files with 77 additions and 11 deletions

View File

@ -18,6 +18,7 @@ import * as html from '../../ml_parser/ast';
import {HtmlParser} from '../../ml_parser/html_parser';
import {WhitespaceVisitor} from '../../ml_parser/html_whitespaces';
import {DEFAULT_INTERPOLATION_CONFIG, InterpolationConfig} from '../../ml_parser/interpolation_config';
import {LexerRange} from '../../ml_parser/lexer';
import {isNgContainer as checkIsNgContainer, splitNsName} from '../../ml_parser/tags';
import {mapLiteral} from '../../output/map_util';
import * as o from '../../output/output_ast';
@ -1574,6 +1575,11 @@ export interface ParseTemplateOptions {
* How to parse interpolation markers.
*/
interpolationConfig?: InterpolationConfig;
/**
* The start and end point of the text to parse within the `source` string.
* The entire `source` string is parsed if this is not provided.
* */
range?: LexerRange;
}
/**