[OLINGO-63] Change LEXER grammer to work with modes

Improve operator precedence and tests
This commit is contained in:
Sven Kobler 2013-11-22 17:17:15 +01:00 committed by Stephan Klevenz
parent 243ca40936
commit 5e10653b1f
11 changed files with 2072 additions and 538 deletions

View File

@ -0,0 +1,498 @@
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
******************************************************************************/
lexer grammar UriLexer;
//;==============================================================================
// Mode "DEFAULT_MODE": Processes everything bevor the first '?' char
// On '?' the next mode "MODE_QUERY" is used
// The percent encoding rules a defined in RFC3986 ABNF rule "path-rootless" apply
//;==============================================================================
QM : '?' -> pushMode(MODE_QUERY);
STRING : '\'' -> more, pushMode(MODE_ODATA_STRING);
fragment A : 'A'|'a';
fragment B : 'B'|'b';
fragment D : 'D'|'d';
fragment E : 'E'|'e';
fragment F : 'F'|'f';
fragment G : 'G'|'g';
fragment I : 'I'|'i';
fragment L : 'L'|'l';
fragment M : 'M'|'m';
fragment N : 'N'|'n';
fragment O : 'O'|'o';
fragment R : 'R'|'r';
fragment S : 'S'|'s';
fragment T : 'T'|'t';
fragment U : 'U'|'u';
fragment Y : 'Y'|'y';
fragment Z : 'Z'|'z';
fragment ALPHA : 'a'..'z' | 'A'..'Z';
fragment ALPHA_A_TO_F : 'a'..'f' | 'A'..'F';
fragment DIGIT : '0'..'9';
fragment DIGITS : DIGIT+;
fragment HEXDIG : DIGIT | ALPHA_A_TO_F;
fragment ODI_LEADINGCHARACTER : ALPHA | '_'; //TODO; add Unicode characters from the categories L or Nl
fragment ODI_CHARACTER : ALPHA | '_' | DIGIT; //TODO; add Unicode characters from the categories L, Nl, Nd, Mn, Mc, Pc, or Cf
BATCH : '$batch';
ENTITY : '$entity';
METADATA : '$metadata';
ALL : '$all';
CROSSJOIN : '$crossjoin';
VALUE : '$value';
REF : '$ref';
COUNT : '$count';
NULLVALUE : 'null';
OPEN : '(' | '%28';
CLOSE : ')' | '%29';
COMMA : ',' | '%2 SLASH_sqpC';
SLASH : '/';
POINT : '.';
AT : '@';
EQ : '=' ;
BOOLEAN : T R U E | F A L S E;
SIGN : '+' | '%2B' |'-';
INT : SIGN? DIGITS;
DECIMAL : INT '.' DIGITS ('e' SIGN? DIGITS)?;
//primary types
BINARY : ('X'| B I N A R Y) SQUOTE (HEXDIG HEXDIG)* SQUOTE; //TODO remove 'x' here and in unit tests
fragment ONE_TO_NINE : '1'..'9';
fragment ZERO_TO_FIFTYNINE : ('0'..'5') DIGIT;
fragment FRACTIONALSECONDS : DIGIT+;
fragment SECOND : ZERO_TO_FIFTYNINE;
fragment MINUTE : ZERO_TO_FIFTYNINE;
fragment HOUR : ('0' | '1') DIGIT | '2' ( '0'..'3');
fragment DAY : '0' '1'..'9' | ('1'|'2') DIGIT | '3' ('0'|'1');
fragment MONTH : '0' ONE_TO_NINE | '1' ( '0' | '1' | '2' );
fragment YEAR : ('-')? ( '0' DIGIT DIGIT DIGIT | ONE_TO_NINE DIGIT DIGIT DIGIT );
DATE : D A T E SQUOTE YEAR '-' MONTH '-' DAY SQUOTE;
DATETIMEOFFSET : D A T E T I M E O F F S E T SQUOTE YEAR '-' MONTH '-' DAY T HOUR ':' MINUTE ( ':' SECOND ( '.' FRACTIONALSECONDS )? )? ( Z | SIGN HOUR ':' MINUTE ) SQUOTE;
fragment DUSECONDFRAG : DIGITS ('.' DIGITS)? 'S';
fragment DUTIMEFRAG : 'T' (
( DIGITS 'H' (DIGITS 'M')? DUSECONDFRAG?)
| (DIGITS 'M' DUSECONDFRAG?)
| DUSECONDFRAG
);
fragment DUDAYTIMEFRAG : DIGITS 'D' DUTIMEFRAG? | DUTIMEFRAG;
DURATION : D U R A T I O N SQUOTE '-'? 'P' DUDAYTIMEFRAG SQUOTE;
TIMEOFDAY : T I M E O F D A Y SQUOTE HOUR ':' MINUTE ( ':' SECOND ( '.' FRACTIONALSECONDS )? )? SQUOTE;
fragment GUIDVALUE : HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG'-'
HEXDIG HEXDIG HEXDIG HEXDIG '-'
HEXDIG HEXDIG HEXDIG HEXDIG '-'
HEXDIG HEXDIG HEXDIG HEXDIG '-'
HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG HEXDIG;
GUID : G U I D SQUOTE GUIDVALUE SQUOTE;
ODATAIDENTIFIER : ODI_LEADINGCHARACTER (ODI_CHARACTER)*;
//;==============================================================================
// Mode "QUERY": Processes everything between the first '?' and the '#' char
// On '?' the next mode "FRAGMENT" is used
// The percent encoding rules a defined in RFC3986 ABNF rule "query" apply
mode MODE_QUERY;
//;==============================================================================
FRAGMENT : '#' -> pushMode(MODE_FRAGMENT);
FILTER : '$filter' -> pushMode(MODE_SYSTEM_QUERY);
ORDERBY : '$orderby' -> pushMode(MODE_SYSTEM_QUERY);
EXPAND : '$expand' -> pushMode(MODE_SYSTEM_QUERY);
SELECT : '$select' -> pushMode(MODE_SYSTEM_QUERY);
SKIP : '$skip' -> pushMode(MODE_SYSTEM_QUERY);
TOP : '$top' -> pushMode(MODE_SYSTEM_QUERY);
LEVELS : '$levels' -> pushMode(MODE_SYSTEM_QUERY);
FORMAT : '$format' -> pushMode(MODE_SYSTEM_QUERY_PCHAR);
COUNT_q : '$count' -> type(COUNT), pushMode(MODE_SYSTEM_QUERY);
REF_q : '$ref' -> type(REF);
VALUE_q : '$value' -> type(VALUE);
ID : '$id'-> pushMode(MODE_SYSTEM_QUERY_REST_QCHAR_NO_AMP);
SKIPTOKEN : '$skiptoken' -> pushMode(MODE_SYSTEM_QUERY_REST_QCHAR_NO_AMP);
SEARCH : '$search'-> pushMode(MODE_SYSTEM_QUERY_SEARCH);
GEOGRAPHY : G_q E_q O_q G_q R_q A_q P_q H_q Y_q -> pushMode(MODE_ODATA_GEO);//TODO make case insensitive
GEOMETRY : G_q E_q O_q M_q E_q T_q R_q Y_q -> pushMode(MODE_ODATA_GEO);
fragment A_q : 'A'|'a';
fragment E_q : 'E'|'e';
fragment G_q : 'G'|'g';
fragment H_q : 'H'|'h';
fragment M_q : 'M'|'m';
fragment O_q : 'O'|'o';
fragment P_q : 'P'|'p';
fragment R_q : 'R'|'r';
fragment S_q : 'S'|'s';
fragment T_q : 'T'|'t';
fragment Y_q : 'Y'|'y';
fragment ALPHA_q : 'a'..'z'|'A'..'Z';
fragment A_TO_F_q : 'a'..'f'|'A'..'F';
fragment DIGIT_q : '0'..'9';
fragment HEXDIG_q : DIGIT_q | A_TO_F_q;
fragment PCT_ENCODED_q : '%' HEXDIG_q HEXDIG_q;
fragment UNRESERVED_q : ALPHA_q | DIGIT_q | '-' |'.' | '_' | '~';
fragment OTHER_DELIMS_q : '!' | '(' | ')' | '*' | '+' | ',' | ';';
fragment QCHAR_NO_AMP_q : UNRESERVED_q | PCT_ENCODED_q | OTHER_DELIMS_q | ':' | '@' | '/' | '?' | '$' | '\'' | '=';
fragment QCHAR_NO_AMP_EQ_q : UNRESERVED_q | PCT_ENCODED_q | OTHER_DELIMS_q | ':' | '@' | '/' | '?' | '$' | '\'';
fragment QCHAR_NO_AMP_EQ_AT_DOLLAR_q : UNRESERVED_q | PCT_ENCODED_q | OTHER_DELIMS_q | ':' | '/' | '?' | '\'';
EQ_q : '=' -> type(EQ);
AMP : '&';
CUSTOMNAME : QCHAR_NO_AMP_EQ_AT_DOLLAR_q QCHAR_NO_AMP_EQ_q*;
CUSTOMVALUE : QCHAR_NO_AMP_EQ_q+;
//;==============================================================================
mode MODE_SYSTEM_QUERY_PCHAR;
//;==============================================================================
AMP_sqp : '&' -> popMode,popMode;
//fragment EQ_sqp : '=';
fragment ALPHA_sqp : 'a'..'z'|'A'..'Z';
fragment A_TO_F_sqp : 'a'..'f'|'A'..'F';
fragment DIGIT_sqp : '0'..'9';
fragment HEXDIG_sqp : DIGIT_sqp | A_TO_F_sqp;
fragment PCT_ENCODED_sqp : '%' HEXDIG_sqp HEXDIG_sqp;
fragment SUB_DELIMS_sqp : '$' | '&' | '\'' | EQ_sqp | OTHER_DELIMS_sqp;
fragment OTHER_DELIMS_sqp : '!' | '(' | ')' | '*' | '+' | ',' | ';';
fragment UNRESERVED_sqp : ALPHA_sqp | DIGIT_sqp | '-' |'.' | '_' | '~';
fragment PCHAR : UNRESERVED_sqp | PCT_ENCODED_sqp | SUB_DELIMS_sqp | ':' | '@';
fragment PCHARSTART : UNRESERVED_sqp | PCT_ENCODED_sqp | '$' | '&' | '\'' | OTHER_DELIMS_sqp | ':' | '@';
ATOM : [Aa][Tt][Oo][Mm];
JSON : [Jj][Ss][Oo][Nn];
XML : [Xx][Mm][Ll];
PCHARS : PCHARSTART PCHAR*;
SLASH_sqp : '/' -> type(SLASH);
EQ_sqp : '=' -> type(EQ);
//;==============================================================================
mode MODE_SYSTEM_QUERY_REST_QCHAR_NO_AMP;
//;==============================================================================
fragment ALPHA_sqr : 'a'..'z'|'A'..'Z';
fragment A_TO_F_sqr : 'a'..'f'|'A'..'F';
fragment DIGIT_sqr : '0'..'9';
fragment HEXDIG_sqr : DIGIT_sqr | A_TO_F_sqr;
fragment PCT_ENCODED_sqr : '%' HEXDIG_sqr HEXDIG_sqr;
fragment UNRESERVED_sqr : ALPHA_sqr | DIGIT_sqr | '-' |'.' | '_' | '~';
fragment OTHER_DELIMS_sqr : '!' | '(' | ')' | '*' | '+' | ',' | ';';
fragment QCHAR_NO_AMP_sqr : UNRESERVED_sqr | PCT_ENCODED_sqr | OTHER_DELIMS_sqr | ':' | '@' | '/' | '?' | '$' | '\'' | '=';
fragment QCHAR_NO_AMP_EQ_sqr : UNRESERVED_sqr | PCT_ENCODED_sqr | OTHER_DELIMS_sqr | ':' | '@' | '/' | '?' | '$' | '\'' ;
//REST : ~[&#]*;
AMP_sqr : '&' -> type(AMP), popMode;
EQ_sqr : '=' -> type(EQ);
FRAGMENT_sqr : '#' -> popMode;
REST : QCHAR_NO_AMP_EQ_sqr QCHAR_NO_AMP_sqr*;
//;==============================================================================
mode MODE_SYSTEM_QUERY_SEARCH;
//;==============================================================================
fragment ALPHA_sqc : 'a'..'z'|'A'..'Z';
fragment WS_sqc : ( SP_g | HTAB_g | '%20' | '%09' );
fragment DQUOTE : '\u0022';
NOT_sqc : 'NOT' -> type(NOT);
AND_sqc : 'AND' -> type(AND);
OR_sqc : 'OR' -> type(OR);
EQ_sqc : '=' -> type(EQ);
WSP_sqc : WS_sqc+ -> type(WSP);
QUOTATION_MARK : DQUOTE | '%22';
REF_sqc : '$ref' -> type(REF);
SEARCHWORD : ALPHA_sqc+;
SEARCHPHRASE : QUOTATION_MARK /*QCHAR_NO_AMP_DQUOTE+*/ ~[&"]* QUOTATION_MARK -> popMode;
//;==============================================================================
mode MODE_SYSTEM_QUERY;
//;==============================================================================
fragment SQUOTE_sq : '\'' -> type(SQUOTE);
STRING_sq : SQUOTE_sq -> more, pushMode(MODE_ODATA_STRING);
GEOGRAPHY_sq : G_sq E_sq O_sq G_sq R_sq A_sq P_sq H_sq Y SQUOTE_sq -> type(GEOGRAPHY), pushMode(MODE_ODATA_GEO); //TODO make case insensitive
GEOMETRY_sq : G_sq E_sq O_sq M_sq E_sq T_sq R_sq Y_sq SQUOTE_sq -> type(GEOMETRY),pushMode(MODE_ODATA_GEO);
fragment A_sq : 'A'|'a';
fragment B_sq : 'B'|'b';
fragment D_sq : 'D'|'d';
fragment E_sq : 'E'|'e';
fragment F_sq : 'F'|'f';
fragment G_sq : 'G'|'g';
fragment H_sq : 'H'|'h';
fragment I_sq : 'I'|'i';
fragment L_sq : 'L'|'l';
fragment M_sq : 'M'|'m';
fragment N_sq : 'N'|'n';
fragment O_sq : 'O'|'o';
fragment P_sq : 'P'|'p';
fragment R_sq : 'R'|'r';
fragment S_sq : 'S'|'s';
fragment T_sq : 'T'|'t';
fragment U_sq : 'U'|'u';
fragment Y_sq : 'Y'|'y';
fragment Z_sq : 'Z'|'z';
fragment ALPHA_sq : 'a'..'z'|'A'..'Z';
fragment ALPHA_A_TO_F_sq : 'a'..'f'|'A'..'F';
fragment DIGIT_sq : '0'..'9';
fragment DIGITS_sq : DIGIT_sq+;
fragment HEXDIG_sq : DIGIT_sq | ALPHA_A_TO_F_sq;
fragment ODI_LEADINGCHARACTER_sq : ALPHA_sq | '_'; //TODO; plus Unicode characters from the categories L or Nl
fragment ODI_CHARACTER_sq : ALPHA_sq | '_' | DIGIT_sq; //TODO; plus Unicode characters from the categories L, Nl, Nd, Mn, Mc, Pc, or Cf
fragment WS_sqr : ( SP_g | HTAB_g | '%20' | '%09' );
OPEN_sq : ('(' | '%28') -> type(OPEN);
CLOSE_sq : (')' | '%29') -> type(CLOSE);
COMMA_sq : (',' | '%2C') -> type(COMMA);
SLASH_sq : '/' -> type(SLASH);
POINT_sq : '.' -> type(POINT);
AT_sq : '@' -> type(AT);
STAR : '*';
SEMI_sq : ';' -> type(SEMI);
EQ_sq : '=' -> type(EQ);
AMP_sq : '&' -> type(AMP), popMode;
WSP_sqr : WS_sqr+ -> type(WSP);
NULLVALUE_sq : 'null' -> type(NULLVALUE);
TRUE : 'true';
FALSE : 'false';
BOOLEAN_sq : (T_sq R_sq U_sq E_sq | F_sq A_sq L_sq S_sq E_sq) -> type(BOOLEAN);
SIGN_sq : ('+' | '%2B' |'-') -> type(SIGN);
INT_sq : SIGN_sq? DIGITS_sq -> type(INT);
DECIMAL_sq : INT_sq '.' DIGITS_sq ('e' SIGN_sq? DIGITS_sq)? -> type(DECIMAL);
BINARY_sq : ('X'| B_sq I_sq N_sq A_sq R_sq Y_sq) SQUOTE_sq (HEXDIG_sq HEXDIG_sq)* SQUOTE_sq -> type(BINARY);
ASC : 'asc';
DESC : 'desc';
MUL : 'mul';
DIV : 'div';
MOD : 'mod';
ADD : 'add';
SUB : 'sub';
GT : 'gt';
GE : 'ge';
LT : 'lt';
LE : 'le';
EQ_ALPHA : 'eq';
NE : 'ne';
AND : 'and';
OR : 'or';
ISOF : 'isof';
NOT : 'not';
MINUS :'-';
ROOT : '$root/';
NANINFINITY : 'NaN' | '-INF' | 'INF';
fragment ONE_TO_NINE_sq : '1'..'9';
fragment ZERO_TO_FIFTYNINE_sq : ('0'..'5') DIGIT_sq;
fragment FRACTIONALSECONDS_sq : DIGIT_sq+;
fragment SECOND_sq : ZERO_TO_FIFTYNINE_sq;
fragment MINUTE_sq : ZERO_TO_FIFTYNINE_sq;
fragment HOUR_sq : ('0' | '1') DIGIT_sq | '2' ( '0'..'3');
fragment DAY_sq : '0' '1'..'9' | ('1'|'2') DIGIT_sq | '3' ('0'|'1');
fragment MONTH_sq : '0' ONE_TO_NINE_sq | '1' ( '0' | '1' | '2' );
fragment YEAR_sq : ('-')? ( '0' DIGIT_sq DIGIT_sq DIGIT_sq | ONE_TO_NINE DIGIT_sq DIGIT_sq DIGIT_sq );
DATE_sq : D_sq A_sq T_sq E_sq SQUOTE_sq YEAR_sq '-' MONTH_sq '-' DAY_sq SQUOTE_sq -> type(DATE);
DATETIMEOFFSET_sq : D_sq A_sq T_sq E_sq T_sq I_sq M_sq E_sq O_sq F_sq F_sq S_sq E_sq T_sq SQUOTE_sq YEAR_sq '-' MONTH_sq '-' DAY_sq T_sq HOUR_sq ':' MINUTE_sq ( ':' SECOND_sq ( '.' FRACTIONALSECONDS_sq )? )? ( Z_sq | SIGN_sq HOUR_sq ':' MINUTE_sq ) SQUOTE_sq -> type(DATETIMEOFFSET);
fragment DUSECONDFRAG_sq : DIGITS_sq ('.' DIGITS_sq)? 'S';
fragment DUTIMEFRAG_sq : 'T' (
( DIGITS_sq 'H' (DIGITS_sq 'M')? DUSECONDFRAG_sq?)
| (DIGITS_sq 'M' DUSECONDFRAG_sq?)
| DUSECONDFRAG_sq
);
fragment DUDAYTIMEFRAG_sq : DIGITS 'D' DUTIMEFRAG? | DUTIMEFRAG;
DURATION_sq : D_sq U_sq R_sq A_sq T_sq I_sq O_sq N_sq SQUOTE_sq '-'? 'P' DUDAYTIMEFRAG_sq SQUOTE_sq -> type(DURATION);
TIMEOFDAY_sq : T_sq I_sq M_sq E_sq O_sq F_sq D_sq A_sq Y_sq SQUOTE_sq HOUR_sq ':' MINUTE_sq ( ':' SECOND_sq ( '.' FRACTIONALSECONDS_sq )? )? SQUOTE_sq -> type(TIMEOFDAY);
GUID_sq : G_sq U_sq I_sq D_sq SQUOTE_sq GUIDVALUE_sq SQUOTE_sq -> type(GUID);
fragment GUIDVALUE_sq : HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq'-'
HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq '-'
HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq '-'
HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq '-'
HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq HEXDIG_sq;
fragment PCT_ENCODED_sq : '%' HEXDIG_sq HEXDIG_sq;
fragment UNRESERVED_sq : ALPHA_sq | DIGIT_sq | '-' |'.' | '_' | '~';
fragment OTHER_DELIMS_sq : '!' | '(' | ')' | '*' | '+' | ',' | ';';
fragment QCHAR_NO_AMP_sq : UNRESERVED_sq | PCT_ENCODED_sq | OTHER_DELIMS_sq | ':' | '@' | '/' | '?' | '$' | '\'' | '=';
IMPLICIT_VARIABLE_EXPR : '$it';
REF_sq : '$ref' -> type(REF);
LEVELS_sq : '$levels' -> type(LEVELS);
CONTAINS_WORD : 'contains(';
STARTSWITH_WORD : 'startswith(';
ENDSWITH_WORD : 'endswith(';
LENGTH_WORD : 'length(';
INDEXOF_WORD : 'indexof(';
SUBSTRING_WORD : 'substring(';
TOLOWER_WORD : 'tolower(';
TOUPPER_WORD : 'toupper(';
TRIM_WORD : 'trim(';
CONCAT_WORD : 'concat(';
YEAR_WORD : 'year(';
MONTH_WORD : 'month(';
DAY_WORD : 'day(';
HOUR_WORD : 'hour(';
MINUTE_WORD : 'minute(';
SECOND_WORD : 'second(';
FRACTIONALSECONDS_WORD : 'fractionalseconds(';
TOTALSECONDS_WORD : 'totalseconds(';
DATE_WORD : 'date(';
TIME_WORD : 'time(';
TOTALOFFSETMINUTES_WORD : 'totaloffsetminutes(';
MINDATETIME_WORD : 'mindatetime(';
MAXDATETIME_WORD : 'maxdatetime(';
NOW_WORD : 'now(';
ROUND_WORD : 'round(';
FLOOR_WORD : 'floor(';
CEILING_WORD : 'ceiling(';
GEO_DISTANCE_WORD : 'geo.distance(';
GEO_LENGTH_WORD : 'geo.length(';
GEO_INTERSECTS_WORD : 'geo.intersects(';
ISOF_WORD : 'isof(';
CAST_WORD : 'cast(';
LEVELSMAX : '$levels=max';
SKIP_sq : '$skip' -> type(SKIP);
COUNT_sq : '$count' -> type(COUNT);
FILTER_sq : '$filter' -> type(FILTER);
SEARCH_sq : '$search' -> type(SEARCH), pushMode(MODE_SYSTEM_QUERY_SEARCH);
//IRI_IN_QUERY : /*EQ*/ QCHAR_NO_AMP_sq*;
ODATAIDENTIFIER_sq : ODI_LEADINGCHARACTER_sq (ODI_CHARACTER_sq)* ->type(ODATAIDENTIFIER);
//;==============================================================================
// Mode "QUERY": Processes everything after the '#' char
// The percent encoding rules a defined in RFC3986 ABNF rule "fragment" apply
//;==============================================================================
mode MODE_FRAGMENT;
TMP_FRAGMENT : 'TMP_FRAGMENT';
//;==============================================================================
//;==============================================================================
mode MODE_ODATA_STRING;//2
fragment COMMA_s : ',' | '%2C';
fragment ALPHA_s : 'a'..'z'|'A'..'Z';
fragment ALPHA_A_TO_F_s : 'a'..'f'|'A'..'F';
fragment DIGIT_s : '0'..'9';
fragment HEXDIG_s : DIGIT_s | ALPHA_A_TO_F_s;
fragment UNRESERVED_s : ALPHA_s | DIGIT_s | '-' |'.' | '_' | '~';
fragment OTHER_DELIMS_s : '!' | '(' | ')' | '*' | '+' | COMMA_s | ';';
fragment PCTENCODEDnoSQUOTE_s : '%' ( '0'|'1'|'3'..'9' | ALPHA_A_TO_F_s ) HEXDIG_s | '%' '2' ( '0'..'6'|'8'|'9' | ALPHA_A_TO_F_s );
fragment PCHARnoSQUOTE_s : UNRESERVED_s| PCTENCODEDnoSQUOTE_s | OTHER_DELIMS_s | '$' | '&' | '=' | ':' | '@';
fragment SQUOTE_s : '\'';
STRING_s : ('\'\'' | PCHARnoSQUOTE_s )* SQUOTE_s -> type(STRING), popMode;
//;==============================================================================
//;==============================================================================
mode MODE_ODATA_GEO;
fragment C_g : 'c'|'C';
fragment D_g : 'd'|'D';
fragment E_g : 'e'|'E';
fragment G_g : 'g'|'G';
fragment H_g : 'h'|'H';
fragment I_g : 'i'|'I';
fragment L_g : 'l'|'L';
fragment M_g : 'm'|'M';
fragment N_g : 'n'|'N';
fragment O_g : 'o'|'O';
fragment P_g : 'p'|'P';
fragment R_g : 'r'|'R';
fragment S_g : 's'|'S';
fragment T_g : 't'|'T';
fragment U_g : 'u'|'U';
fragment Y_g : 'y'|'Y';
fragment SP_g : ' ';//'\u0020'; // a simple space
fragment HTAB_g : '%09';
fragment WS_g : ( SP_g | HTAB_g | '%20' | '%09' );
OPEN_g : ('(' | '%28') -> type(OPEN);
CLOSE_g : (')' | '%29') -> type(CLOSE);
COMMA_g : (',' | '%2C') -> type(COMMA);
WSP : WS_g+;
POINT_g : '.' -> type(POINT);
AT_g : '@' -> type(AT);
SEMI : (';' | '%3B');
EQ_g : '=' -> type(EQ);
fragment DIGIT_g : '0'..'9';
fragment DIGITS_g : DIGIT_g+;
SIGN_g : ('+' | '%2B' |'-') -> type(SIGN);
INT_g : SIGN_g? DIGITS_g -> type(INT);
DECIMAL_g : INT_g '.' DIGITS_g ('e' SIGN_g? DIGITS_g)? -> type(DECIMAL);
COLLECTION : C_g O_g L_g L_g E_g C_g T_g I_g O_g N_g ;
LINESTRING : L_g I_g N_g E_g S_g T_g R_g I_g N_g G_g ;
MULTILINESTRING : M_g U_g L_g T_g I_g L_g I_g N_g E_g S_g T_g R_g I_g N_g G_g;
MULTIPOINT : M_g U_g L_g T_g I_g P_g O_g I_g N_g T_g ;
MULTIPOLYGON : M_g U_g L_g T_g I_g P_g O_g L_g Y_g G_g O_g N_g;
GEO_POINT : P_g O_g I_g N_g T_g;
POLYGON : P_g O_g L_g Y_g G_g O_g N_g ;
SRID : S_g R_g I_g D_g;
SQUOTE : '\'' -> popMode;

View File

@ -65,9 +65,8 @@ FORMAT : '$format' EQ
( 'atom'
| 'json'
| 'xml'
| PCHAR+ '/' PCHAR+ //; <a data service specific value indicating a
); //; format specific to the specific data service> or
//; <An IANA-defined [IANA-MMT] content type>
| PCHAR+ '/' PCHAR+
);
ID : '$id' EQ QCHAR_NO_AMP+;
@ -269,7 +268,8 @@ fragment ZEROtoFIFTYNINE : ('0'..'5') DIGIT;
fragment COLLECTION_CS_FIX : 'Collection';
COLLECTION_CS_FIX : 'Collection';
fragment LINESTRING_CS_FIX : 'LineString';
fragment MULTILINESTRING_CS_FIX : 'MultiLineString';
fragment MULTIPOINT_CS_FIX : 'MultiPoint';
@ -301,19 +301,31 @@ fragment GEOMETRY_CS_FIX : 'Geometry';
//; 9. Punctuation
//;------------------------------------------------------------------------------
WS : ( SP | HTAB | '%20' | '%09' );
fragment WS : ( SP | HTAB | '%20' | '%09' );
WSP : WS+ { !inGeo() }?;
fragment AT_PURE : '@';
AT : AT_PURE | '%40';
COLON : ':' | '%3A';
COMMA : ',' | '%2C';
EQ : '=';
SIGN : '+' | '%2B' |'-';
SEMI : ';' | '%3B';
STAR : '*';
SQUOTE : '\'' | '%27';
OPEN : '(' | '%28';
CLOSE : ')' | '%29';
// we can't not emit a single WS inside filter expression where left-recursion applies
// commonExpr : INT
// | commonExpr WS+ ('mul'|'div'|'mod') WS+ commonExpr
// does not work... ^^^ ^^^ but:
// commonExpr : INT
// | commonExpr WSP ('mul'|'div'|'mod') WSP commonExpr
// a single WS in only allowed/required in geometry
WSG : WS { inGeo() }?;
fragment AT_PURE : '@';
AT : AT_PURE | '%40';
COLON : ':' | '%3A';
COMMA : ',' | '%2C';
EQ : '=';
SIGN : '+' | '%2B' |'-';
SEMI : ';' | '%3B';
STAR : '*';
SQUOTE : '\'' | '%27';
OPEN : '(' | '%28';
CLOSE : ')' | '%29';
//;------------------------------------------------------------------------------
@ -332,13 +344,13 @@ fragment SUB_DELIMS : '$' | '&' | '\'' | EQ | OTHER_DELIMS;
fragment OTHER_DELIMS : '!' | '(' | ')' | '*' | '+' | COMMA | ';';
fragment PCTENCODEDnoSQUOTE : '%' ( '0'|'1'|'3'..'9' | AtoF ) HEXDIG
fragment PCTENCODEDnoSQUOTE : '%' ( '0'|'1'|'3'..'9' | AtoF ) HEXDIG
| '%' '2' ( '0'..'6'|'8'|'9' | AtoF )
;
fragment QCHAR_NO_AMP : UNRESERVED | PCT_ENCODED | OTHER_DELIMS | ':' | AT_PURE | '/' | '?' | '$' | '\'' | EQ;
fragment QCHAR_NO_AMP_EQ : UNRESERVED | PCT_ENCODED | OTHER_DELIMS | ':' | AT_PURE | '/' | '?' | '$' | '\'';
fragment QCHAR_NO_AMP_EQ_AT_DOLLAR : UNRESERVED | PCT_ENCODED | OTHER_DELIMS | ':' | '/' | '?' | '\'';
fragment QCHAR_NO_AMP_EQ_AT_DOLLAR : UNRESERVED | PCT_ENCODED | OTHER_DELIMS | ':' | '/' | '?' | '\'';
fragment QCHAR_UNESCAPED : UNRESERVED | PCT_ENCODED_UNESCAPED | OTHER_DELIMS | ':' | AT_PURE | '/' | '?' | '$' | '\'' | EQ;
fragment PCT_ENCODED_UNESCAPED : '%' ( '0' | '1' | '3' | '4' | '6' | '8' | '9' | 'A'..'F' ) HEXDIG
@ -412,11 +424,7 @@ SEARCHWORD : ALPHA+ { inSearch() }?; //; Actually: any character
//; but not the words AND, OR, and NOT which are match far above
//conflict with STRING_IN_JSON, fixed with predicate
SEARCHPHRASE: QUOTATION_MARK QCHAR_NO_AMP_DQUOTE+ QUOTATION_MARK { inSearch() }?;
//TODO fix conflict
//CUSTOMNAME : QCHAR_NO_AMP_EQ_AT_DOLLAR QCHAR_NO_AMP_EQ* { IsCUSTallowed() }?;
//CUSTOMVALUE : QCHAR_NO_AMP+ { IsCUSTallowed() }?;
SEARCHPHRASE : QUOTATION_MARK QCHAR_NO_AMP_DQUOTE+ QUOTATION_MARK { inSearch() }?;
ODATAIDENTIFIER : ODI_LEADINGCHARACTER (ODI_CHARACTER)*;

View File

@ -16,26 +16,60 @@
* specific language governing permissions and limitations
* under the License.
******************************************************************************/
grammar Uri;
grammar UriParser;
//Antlr4 (as most parsers) has a lexer for token detection and a parser which defines
//rules based on the tokens. However its hard to define a clear lexer on the
//ODATA URI syntax due to some reasons:
// - the syntax is based on the URI specification and there fore contains the definition
// of delimiters and percent encoding
// - the syntax includes JSON
// - the syntax includes a expression syntax which comes from ODATA itself (e.g. $filter)
// - the syntax includes searchstring and searchword
// - the ABNF describing syntax is not defined in a context free manner
// so there are several kinds of "String" tokens:
// - strings with single quotes,
// - strings with single quotes and a special syntax within the quotes (like geometry data)
// - strings with double quotes
// - strings without quotes ( usually identifiers, searchstrings, searchwords, custom parameters)
// but each with different allowed charactersets
// Drawing a simple line between lexer and parser is not possible.
//
// This grammer is a compromiss we have choosen to satisfy the requirements we have
// - the grammer is context free
// - this makes the parser much simpler and we have a clear saparation between parsing and
// EDM validation, but also creates a parse tree which is not semantically correct from the
// EDM perspective ( e.g.it will not pass the EDM validation)
// - the grammer can not be applied on a full URI string
// - the URI should be split according the URI specification before used as input for the
// ODATA parser
// - while creating the grammer the antlr lexer modes where only allowed in pure lexer grammers
// not in combined grammers, and is was not possible to include lexer grammer with a mode into
// a combined grammar without creating JAVA errors.
// see https://github.com/antlr/antlr4/issues/160 "Support importing multi-mode lexer grammars"
//Naming convention
// ...
//Decoding encoding
//- within rule "resourcePath": special chars used in EDM.Strings must still be encoded when
// used as tokenizer input
// e.g. .../Employees(id='Hugo%2FMueller')/EmployeeName <-- '/' must be encoded to '%2F' in "Hugo/Mueller"
// e.g. .../Employees(id='Hugo%2FMueller')/EmployeeName <-- SLASH must be encoded to '%2F' in "Hugo/Mueller"
// but it must not be encoded before the EmployeeName
options {
language = Java;
tokenVocab=UriLexer;
}
import UriLexerPart; //contain Lexer rules
test : test_expr;
test_expr : test_expr '*' test_expr
| test_expr '+' test_expr
| INT;
//;------------------------------------------------------------------------------
//; 0. URI
@ -50,72 +84,83 @@ test_expr : test_expr '*' test_expr
odataRelativeUriEOF : odataRelativeUri? EOF;
odataRelativeUri : '$batch' # batchAlt
| '$entity' '?' eo=entityOptions # entityAlt
| '$metadata' ( '?' format )? ( FRAGMENT contextFragment )? # metadataAlt
| resourcePath ( '?' queryOptions )? # resourcePathAlt
//QM and FRAGMENT enable next lexer mode
odataRelativeUri : BATCH # batchAlt //TODO alt at beginnig
| ENTITY QM eo=entityOptions # entityAlt
| METADATA ( QM format )? ( FRAGMENT contextFragment )? # metadataAlt
| resourcePath ( QM queryOptions )? # resourcePathAlt
;
//;------------------------------------------------------------------------------
//; 1. Resource Path
//;------------------------------------------------------------------------------
resourcePath : '$all' # allAlt
resourcePath : ALL # allAlt
| crossjoin # crossjoinAlt
| pathSegments # pathSegmentsAlt
;
crossjoin : '$crossjoin' OPEN odi+=odataIdentifier ( COMMA odi+=odataIdentifier )* CLOSE;
crossjoin : CROSSJOIN OPEN odi+=odataIdentifier ( COMMA odi+=odataIdentifier )* CLOSE;
pathSegments : ps+=pathSegment ('/' ps+=pathSegment)* constSegment?;
pathSegments : ps+=pathSegment (SLASH ps+=pathSegment)* constSegment?;
pathSegment : ns=namespace? odi=odataIdentifier nvl=nameValueOptList*;
nameValueOptList : vo=valueOnly | nvl=nameValueList;
valueOnly : OPEN (primitiveLiteral /*| enumX*/) CLOSE;
valueOnly : OPEN (primitiveLiteral ) CLOSE;
nameValueList : OPEN kvp+=nameValuePair ( COMMA kvp+=nameValuePair )* CLOSE;
nameValuePair : odi=odataIdentifier EQ (AT ali=odataIdentifier | val1=primitiveLiteral /*| val2=enumX*/);
constSegment : '/' (v=VALUE | c=COUNT | r=REF );
constSegment : SLASH (v=value | c=count | r=ref );
count : COUNT;
ref : REF;
value : VALUE;
//;------------------------------------------------------------------------------
//; 2. Query Options
//;------------------------------------------------------------------------------
queryOptions : qo+=queryOption ( '&' qo+=queryOption )*;
queryOptions : qo+=queryOption ( AMP qo+=queryOption )*;
queryOption : systemQueryOption
| aliasAndValue
| customQueryOption
| AT aliasAndValue
| customQueryOption
;
entityOptions : (eob+=entityOption '&' )* id=ID ( '&' eoa+=entityOption )*;
entityOption : expand
| format
| select
| customQueryOption
entityOptions : (eob+=entityOption AMP )* ID EQ REST ( AMP eoa+=entityOption )*;
entityOption : ( expand | format | select )
| customQueryOption
;
systemQueryOption : expand
| filter
| format
| ID
| id
| inlinecount
| orderby
| search
| select
| skip
| SKIPTOKEN
| top ;
| skiptoken
| top
;
expand : '$expand' EQ expandItemList;
id : ID EQ REST;
skiptoken : SKIPTOKEN EQ REST;
expand : EXPAND EQ expandItemList;
expandItemList : expandItem ( COMMA expandItem )*;
expandItem : STAR ( '/' REF | OPEN LEVELS CLOSE )?
expandItem : STAR ( SLASH ref | OPEN (LEVELS EQ INT | LEVELSMAX) CLOSE )?
| expandPath expandPathExtension?;
expandPath : ( namespace? odataIdentifier ) ( '/' namespace? odataIdentifier )*;
expandPathExtension : '/' REF ( OPEN expandRefOption ( SEMI expandRefOption )* CLOSE )?
| '/' COUNT ( OPEN expandCountOption ( SEMI expandCountOption )* CLOSE )?
expandPath : ( namespace? odataIdentifier ) ( SLASH namespace? odataIdentifier )*;
expandPathExtension : SLASH ref ( OPEN expandRefOption ( SEMI expandRefOption )* CLOSE )?
| SLASH count ( OPEN expandCountOption ( SEMI expandCountOption )* CLOSE )?
| OPEN expandOption ( SEMI expandOption )* CLOSE
;
expandCountOption : filter
@ -132,28 +177,27 @@ expandOption : expandRefOption
| expand
| LEVELS;
filter : '$filter' EQ commonExpr;
filter : FILTER EQ commonExpr;
orderby : '$orderby' EQ orderbyItem ( COMMA orderbyItem )*;
orderbyItem : commonExpr ( WS+ ( 'asc' | 'desc' ) )?;
orderby : ORDERBY EQ orderbyItem ( COMMA orderbyItem )*;
orderbyItem : commonExpr ( WSP ( ASC | DESC ) )?;
//this is completly done in lexer grammer to avoid ambiguities with odataIdentifier and STRING
skip : SKIP;
top : TOP;
format : FORMAT;
skip : SKIP EQ INT;
top : TOP EQ INT;
format : FORMAT EQ ( ATOM | JSON | XML | PCHARS ( SLASH PCHARS)?);
inlinecount : '$count' EQ booleanNonCase;
inlinecount : COUNT EQ booleanNonCase;
search : '$search' searchSpecialToken;
search : SEARCH searchSpecialToken;
searchSpecialToken : { ((UriLexer) this.getInputStream().getTokenSource()).setInSearch(true); }
EQ WS* searchExpr
{ ((UriLexer) this.getInputStream().getTokenSource()).setInSearch(false); }
;
searchSpecialToken : EQ WSP? searchExpr;
searchExpr : 'NOT' WS+ searchExpr
| searchExpr WS+ ('AND' WS+)? searchExpr
| searchExpr WS+ 'OR' WS+ searchExpr
searchExpr : (NOT WSP) searchExpr
| searchExpr searchExpr
| searchExpr WSP searchExpr
| searchExpr ( WSP AND WSP) searchExpr
| searchExpr ( WSP OR WSP) searchExpr
| searchPhrase
| searchWord
;
@ -161,46 +205,51 @@ searchExpr : 'NOT' WS+ searchExpr
searchPhrase : SEARCHPHRASE;
searchWord : SEARCHWORD;
select : '$select' EQ selectItem ( COMMA selectItem )*;
selectItem : namespace? '*'
| (namespace? odataIdentifier nameValueOptList? ) ( '/' namespace? odataIdentifier nameValueOptList? )*
select : SELECT EQ selectItem ( COMMA selectItem )*;
selectItem : namespace? STAR
| (namespace? odataIdentifier nameValueOptList? ) ( SLASH namespace? odataIdentifier nameValueOptList? )*
;
aliasAndValue : AT odataIdentifier EQ parameterValue;
parameterValue : arrayOrObject
aliasAndValue : odataIdentifier EQ parameterValue;
parameterValue : //arrayOrObject
commonExpr
;
customQueryOption : { ((UriLexer) this.getInputStream().getTokenSource()).setINCustomOption(true); }
customName ( EQ customValue)?
{ ((UriLexer) this.getInputStream().getTokenSource()).setINCustomOption(false); }
customQueryOption : customName ( EQ customValue)?
;
customName : 'CUSTOMNAME';
customValue : 'CUSTOMVALUE';
customName : CUSTOMNAME;
customValue : CUSTOMVALUE;
//;------------------------------------------------------------------------------
//; 3. Context URL Fragments
//;------------------------------------------------------------------------------
contextFragment : 'Collection($ref)'
| '$ref'
//ps+=pathSegment (SLASH ps+=pathSegment)*
//PRIMITIVETYPENAME
contextFragment : REF
| PRIMITIVETYPENAME
| 'Collection($ref)'
| 'Collection(Edm.EntityType)'
| 'Collection(Edm.ComplexType)'
| PRIMITIVETYPENAME
| 'collection' OPEN ( PRIMITIVETYPENAME | namespace odataIdentifier ) CLOSE
| COLLECTION_FIX OPEN ( PRIMITIVETYPENAME | namespace odataIdentifier ) CLOSE
| namespace? odataIdentifier
( '/$deletedEntity'
| '/$link'
| '/$deletedLink'
| nameValueOptList? ( '/' namespace? odataIdentifier)* ( propertyList )? ( '/$delta' )? ( entity )?
)?
| nameValueOptList? ( SLASH namespace? odataIdentifier)* ( propertyList )? ( '/$delta' )? ( entity )?
)
;
propertyList : OPEN propertyListItem ( COMMA propertyListItem )* CLOSE;
propertyListItem : STAR //; all structural properties
| propertyListProperty
;
propertyListProperty : odataIdentifier ( '+' )? ( propertyList )?
| odataIdentifier ( '/' propertyListProperty )?
propertyListProperty : namespace? odataIdentifier ( SLASH namespace? odataIdentifier)* ( '+' )? ( propertyList)?
;
entity : '/$entity';
@ -211,25 +260,30 @@ entity : '/$entity';
// this expression part of the grammer is not similar to the ABNF because
// we had to introduced operator precesence witch is not reflected in the ABNF
test : test_expr EOF;
test_expr : INT
//| test_expr /*WSP*/ ( '!' | '*' ) /*WSP*/ test_expr;
//| test_expr WSP ( '!' | '*' ) WSP test_expr;
| test_expr ( WSP '!' WSP | WSP '*' WSP ) test_expr;
commonExpr : OPEN commonExpr CLOSE
| methodCallExpr
| unary WS+ commonExpr
| memberExpr
| commonExpr WS+ ('mul'|'div'|'mod') WS+ commonExpr
| commonExpr WS+ ('add'|'sub') WS+ commonExpr
| commonExpr WS+ ('gt'|'ge'|'lt'|'le'|'isof') WS+ commonExpr
| commonExpr WS+ ('eq'|'ne') WS+ commonExpr
| commonExpr WS+ ('and') WS+ commonExpr
| commonExpr WS+ ('or') WS+ commonExpr
| rootExpr //; $...
//| AT odataIdentifier //; @...
| primitiveLiteral //; ...
commonExpr : OPEN commonExpr CLOSE #altPharenthesis
| methodCallExpr #altMethod
| ( unary WSP ) commonExpr #altUnary
| memberExpr #altMember
| commonExpr (WSP MUL WSP | WSP DIV WSP | WSP MOD WSP ) commonExpr #altMult
| commonExpr (WSP ADD WSP | WSP SUB WSP) commonExpr #altAdd
| commonExpr (WSP GT WSP | WSP GE WSP | WSP LT WSP | WSP LE WSP | WSP ISOF WSP) commonExpr #altComparisn
| commonExpr (WSP EQ_ALPHA WSP | WSP NE WSP) commonExpr #altEquality
| commonExpr (WSP AND WSP) commonExpr #altAnd
| commonExpr (WSP OR WSP) commonExpr #altOr
| rootExpr #altRoot //; $...
| AT odataIdentifier #altAlias // @...
| primitiveLiteral #altLiteral // ...
;
unary : ('-'|'not') ;
unary : (MINUS| NOT) ;
rootExpr : '$root/' pathSegments;
rootExpr : ROOT pathSegments;
memberExpr : '$it' | '$it/'? pathSegments;
@ -272,43 +326,43 @@ methodCallExpr : indexOfMethodCallExpr
;
containsMethodCallExpr : CONTAINS OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
startsWithMethodCallExpr : STARTSWITH OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
endsWithMethodCallExpr : ENDSWITH OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
lengthMethodCallExpr : LENGTH OPEN WS* commonExpr WS* CLOSE;
indexOfMethodCallExpr : INDEXOF OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
substringMethodCallExpr : SUBSTRING OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* ( COMMA WS* commonExpr WS* )? CLOSE;
toLowerMethodCallExpr : TOLOWER OPEN WS* commonExpr WS* CLOSE;
toUpperMethodCallExpr : TOUPPER OPEN WS* commonExpr WS* CLOSE;
trimMethodCallExpr : TRIM OPEN WS* commonExpr WS* CLOSE;
concatMethodCallExpr : CONCAT OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
containsMethodCallExpr : CONTAINS_WORD WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
startsWithMethodCallExpr : STARTSWITH_WORD WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
endsWithMethodCallExpr : ENDSWITH_WORD WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
lengthMethodCallExpr : LENGTH_WORD WS* commonExpr WS* CLOSE;
indexOfMethodCallExpr : INDEXOF_WORD WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
substringMethodCallExpr : SUBSTRING_WORD WS* commonExpr WS* COMMA WS* commonExpr WS* ( COMMA WS* commonExpr WS* )? CLOSE;
toLowerMethodCallExpr : TOLOWER_WORD WS* commonExpr WS* CLOSE;
toUpperMethodCallExpr : TOUPPER_WORD WS* commonExpr WS* CLOSE;
trimMethodCallExpr : TRIM_WORD WS* commonExpr WS* CLOSE;
concatMethodCallExpr : CONCAT_WORD WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
yearMethodCallExpr : 'year' OPEN WS* commonExpr WS* CLOSE;
monthMethodCallExpr : 'month' OPEN WS* commonExpr WS* CLOSE;
dayMethodCallExpr : 'day' OPEN WS* commonExpr WS* CLOSE;
hourMethodCallExpr : 'hour' OPEN WS* commonExpr WS* CLOSE;
minuteMethodCallExpr : 'minute' OPEN WS* commonExpr WS* CLOSE;
secondMethodCallExpr : 'second' OPEN WS* commonExpr WS* CLOSE;
fractionalsecondsMethodCallExpr : 'fractionalseconds' OPEN WS* commonExpr WS* CLOSE;
totalsecondsMethodCallExpr : 'totalseconds' OPEN WS* commonExpr WS* CLOSE;
dateMethodCallExpr : 'date' OPEN WS* commonExpr WS* CLOSE;
timeMethodCallExpr : 'time' OPEN WS* commonExpr WS* CLOSE;
totalOffsetMinutesMethodCallExpr : 'totaloffsetminutes' OPEN WS* commonExpr WS* CLOSE;
yearMethodCallExpr : YEAR_WORD WS* commonExpr WS* CLOSE;
monthMethodCallExpr : MONTH_WORD WS* commonExpr WS* CLOSE;
dayMethodCallExpr : DAY_WORD WS* commonExpr WS* CLOSE;
hourMethodCallExpr : HOUR_WORD WS* commonExpr WS* CLOSE;
minuteMethodCallExpr : MINUTE_WORD WS* commonExpr WS* CLOSE;
secondMethodCallExpr : SECOND_WORD WS* commonExpr WS* CLOSE;
fractionalsecondsMethodCallExpr : FRACTIONALSECONDS_WORD WS* commonExpr WS* CLOSE;
totalsecondsMethodCallExpr : TOTALSECONDS_WORD WS* commonExpr WS* CLOSE;
dateMethodCallExpr : DATE_WORD WS* commonExpr WS* CLOSE;
timeMethodCallExpr : TIME_WORD WS* commonExpr WS* CLOSE;
totalOffsetMinutesMethodCallExpr : TOTALOFFSETMINUTES_WORD WS* commonExpr WS* CLOSE;
minDateTimeMethodCallExpr : 'mindatetime' OPEN WS* CLOSE;
maxDateTimeMethodCallExpr : 'maxdatetime' OPEN WS* CLOSE;
nowMethodCallExpr : 'now' OPEN WS* CLOSE;
minDateTimeMethodCallExpr : MINDATETIME_WORD WS* CLOSE;
maxDateTimeMethodCallExpr : MAXDATETIME_WORD WS* CLOSE;
nowMethodCallExpr : NOW_WORD WS* CLOSE;
roundMethodCallExpr : 'round' OPEN WS* commonExpr WS* CLOSE;
floorMethodCallExpr : 'floor' OPEN WS* commonExpr WS* CLOSE;
ceilingMethodCallExpr : 'ceiling' OPEN WS* commonExpr WS* CLOSE;
roundMethodCallExpr : ROUND_WORD WS* commonExpr WS* CLOSE;
floorMethodCallExpr : FLOOR_WORD WS* commonExpr WS* CLOSE;
ceilingMethodCallExpr : CEILING_WORD WS* commonExpr WS* CLOSE;
distanceMethodCallExpr : 'geo.distance' OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
geoLengthMethodCallExpr : 'geo.length' OPEN WS* commonExpr WS* CLOSE;
intersectsMethodCallExpr : 'geo.intersects' OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
distanceMethodCallExpr : GEO_DISTANCE_WORD OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
geoLengthMethodCallExpr : GEO_LENGTH_WORD OPEN WS* commonExpr WS* CLOSE;
intersectsMethodCallExpr : GEO_INTERSECTS_WORD OPEN WS* commonExpr WS* COMMA WS* commonExpr WS* CLOSE;
isofExpr : 'isof' OPEN WS* ( commonExpr WS* COMMA WS* )? qualifiedtypename WS* CLOSE;
castExpr : 'cast' OPEN WS* ( commonExpr WS* COMMA WS* )? qualifiedtypename WS* CLOSE;
isofExpr : ISOF_WORD WS* ( commonExpr WS* COMMA WS* )? qualifiedtypename WS* CLOSE;
castExpr : CAST_WORD WS* ( commonExpr WS* COMMA WS* )? qualifiedtypename WS* CLOSE;
//;------------------------------------------------------------------------------
//; 5. JSON format for function parameters
@ -316,7 +370,7 @@ castExpr : 'cast' OPEN WS* ( commonExpr WS* COMMA WS* )
//; Note: the query part of a URI needs to be partially percent-decoded before
//; applying these rules, see comment at the top of this file
//;------------------------------------------------------------------------------
/*
arrayOrObject : complexColInUri
| complexInUri
| rootExprCol
@ -375,11 +429,10 @@ primitive1LiteralInJSON : STRING_IN_JSON
;
number_in_json : INT | DECIMAL;
*/
//;------------------------------------------------------------------------------
//; 6. Names and identifiers
//;------------------------------------------------------------------------------
POINT : '.';
qualifiedtypename : PRIMITIVETYPENAME
| namespace odataIdentifier
@ -435,9 +488,11 @@ enumValue : singleEnumValue *( COMMA singleEnumValue );
singleEnumValue : odataIdentifier / INT;
geographyCollection : GEOGRAPHYPREFIX fullCollectionLiteral SQUOTE;
geographyCollection : GEOGRAPHY fullCollectionLiteral SQUOTE;
fullCollectionLiteral : sridLiteral collectionLiteral;
collectionLiteral : COLLECTION_CS OPEN geoLiteral ( COMMA geoLiteral )* CLOSE;
collectionLiteral : (COLLECTION | COLLECTION_FIX) OPEN geoLiteral ( COMMA geoLiteral )* CLOSE;
geoLiteral : collectionLiteral
| lineStringLiteral
| multipointLiteral
@ -446,45 +501,54 @@ geoLiteral : collectionLiteral
| pointLiteral
| polygonLiteral;
geographyLineString : GEOGRAPHYPREFIX fullLineStringLiteral SQUOTE;
geographyLineString : GEOGRAPHY fullLineStringLiteral SQUOTE;
fullLineStringLiteral : sridLiteral lineStringLiteral;
lineStringLiteral : LINESTRING_CS lineStringData;
lineStringLiteral : LINESTRING lineStringData;
lineStringData : OPEN positionLiteral ( COMMA positionLiteral )* CLOSE;
geographyMultilineString : GEOGRAPHYPREFIX fullMultilineStringLiteral SQUOTE;
geographyMultilineString : GEOGRAPHY fullMultilineStringLiteral SQUOTE;
fullMultilineStringLiteral : sridLiteral multilineStringLiteral;
multilineStringLiteral : MULTILINESTRING_CS OPEN ( lineStringData ( COMMA lineStringData )* )? CLOSE;
multilineStringLiteral : MULTILINESTRING OPEN ( lineStringData ( COMMA lineStringData )* )? CLOSE;
geographyMultipoint : GEOGRAPHYPREFIX fullMultipointLiteral SQUOTE;
geographyMultipoint : GEOGRAPHY fullMultipointLiteral SQUOTE;
fullMultipointLiteral : sridLiteral multipointLiteral;
multipointLiteral : MULTIPOINT_CS OPEN ( pointData ( COMMA pointData )* )? CLOSE ;
multipointLiteral : MULTIPOINT OPEN ( pointData ( COMMA pointData )* )? CLOSE ;
geographyMultipolygon : GEOGRAPHYPREFIX fullmultipolygonLiteral SQUOTE;
geographyMultipolygon : GEOGRAPHY fullmultipolygonLiteral SQUOTE;
fullmultipolygonLiteral : sridLiteral multipolygonLiteral;
multipolygonLiteral : MULTIPOLYGON_CS OPEN ( polygonData ( COMMA polygonData )* )? CLOSE;
multipolygonLiteral : MULTIPOLYGON OPEN ( polygonData ( COMMA polygonData )* )? CLOSE;
geographyPoint : GEOGRAPHYPREFIX fullpointLiteral SQUOTE;
geographyPoint : GEOGRAPHY fullpointLiteral SQUOTE;
fullpointLiteral : sridLiteral pointLiteral;
pointLiteral : POINT_CS pointData;
pointLiteral : GEO_POINT pointData;
pointData : OPEN positionLiteral CLOSE;
positionLiteral : (DECIMAL | INT ) WS (DECIMAL | INT ); //; longitude, then latitude
geographyPolygon : GEOGRAPHYPREFIX fullPolygonLiteral SQUOTE;
positionLiteral : (DECIMAL | INT ) WSP (DECIMAL | INT ); //; longitude, then latitude
geographyPolygon : GEOGRAPHY fullPolygonLiteral SQUOTE;
fullPolygonLiteral : sridLiteral polygonLiteral;
polygonLiteral : POLYGON_CS polygonData;
polygonLiteral : POLYGON polygonData;
polygonData : OPEN ringLiteral ( COMMA ringLiteral )* CLOSE;
ringLiteral : OPEN positionLiteral ( COMMA positionLiteral )* CLOSE;
geometryCollection : GEOMETRYPREFIX fullCollectionLiteral SQUOTE;
geometryLineString : GEOMETRYPREFIX fullLineStringLiteral SQUOTE;
geometryMultilineString : GEOMETRYPREFIX fullMultilineStringLiteral SQUOTE;
geometryMultipoint : GEOMETRYPREFIX fullMultipointLiteral SQUOTE;
geometryMultipolygon : GEOMETRYPREFIX fullmultipolygonLiteral SQUOTE;
geometryPoint : GEOMETRYPREFIX fullpointLiteral SQUOTE;
geometryPolygon : GEOMETRYPREFIX fullPolygonLiteral SQUOTE;
geometryCollection : GEOMETRY fullCollectionLiteral SQUOTE;
geometryLineString : GEOMETRY fullLineStringLiteral SQUOTE;
geometryMultilineString : GEOMETRY fullMultilineStringLiteral SQUOTE;
geometryMultipoint : GEOMETRY fullMultipointLiteral SQUOTE;
geometryMultipolygon : GEOMETRY fullmultipolygonLiteral SQUOTE;
geometryPoint : GEOMETRY fullpointLiteral SQUOTE;
geometryPolygon : GEOMETRY fullPolygonLiteral SQUOTE;
sridLiteral : SRID_CS EQ INT SEMI;
sridLiteral : SRID EQ INT SEMI;
/*
mode MODEd333gh;
MO12E1 : 'ASD' -> mode(DEFAULT_MODE);*/

View File

@ -38,17 +38,17 @@ import org.apache.olingo.producer.api.uri.KeyPredicate;
import org.apache.olingo.producer.api.uri.UriInfoKind;
import org.apache.olingo.producer.api.uri.UriPathInfoKind;
import org.apache.olingo.producer.core.uri.antlr.UriLexer;
import org.apache.olingo.producer.core.uri.antlr.UriParser;
import org.apache.olingo.producer.core.uri.antlr.UriParser.AllAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.BatchAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.CrossjoinAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.EntityAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.MetadataAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.OdataRelativeUriContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.PathSegmentContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.PathSegmentsAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.PathSegmentsContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.ResourcePathAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.AllAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.BatchAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.CrossjoinAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.EntityAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.MetadataAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.OdataRelativeUriContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.PathSegmentContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.PathSegmentsAltContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.PathSegmentsContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.ResourcePathAltContext;
public class UriParserImpl {
@ -72,9 +72,9 @@ public class UriParserImpl {
return new UriInfoImpl().setKind(UriInfoKind.batch);
} else if (root instanceof EntityAltContext) {
// TODO
// TODO implement
} else if (root instanceof MetadataAltContext) {
// TODO
// TODO implement
} else if (root instanceof ResourcePathAltContext) {
return readResourcePath(root);
@ -113,11 +113,11 @@ public class UriParserImpl {
}
private void readFirstPathSegment(UriInfoImpl uriInfo, PathSegmentContext ctx) {
/*if (ctx.ns != null) {//TODO
/*if (ctx.ns != null) {//TODO implement
// Error: First pathsegment can not be qualified. Allowed is entityset|function...
}*/
/*if (ctx.odi == null) {//TODO
/*if (ctx.odi == null) {//TODO implement
// Error: First pathsegment must contain an odata identifier
}*/
@ -262,7 +262,7 @@ public class UriParserImpl {
UriLexer lexer = new UriLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
UriParser parser = new UriParser(tokens);
UriParserParser parser = new UriParserParser(tokens);
// parser.addErrorListener(new ErrorHandler());
// if (stage == 1) {

View File

@ -20,9 +20,6 @@ package org.apache.olingo.producer.core.testutil;
import java.util.Arrays;
import org.apache.olingo.commons.api.edm.EdmEntityContainer;
import org.apache.olingo.commons.api.edm.helper.EntityContainerInfo;
import org.apache.olingo.commons.api.edm.helper.FullQualifiedName;
import org.apache.olingo.commons.api.edm.provider.EdmProviderAdapter;
import org.apache.olingo.commons.api.edm.provider.EntitySet;
@ -32,7 +29,6 @@ import org.apache.olingo.commons.api.edm.provider.PropertyRef;
import org.apache.olingo.commons.api.exception.ODataException;
import org.apache.olingo.commons.api.exception.ODataNotImplementedException;
import org.apache.olingo.commons.core.edm.primitivetype.EdmPrimitiveTypeKind;
import org.apache.olingo.commons.core.edm.provider.EdmEntityContainerImpl;
public class EdmTechProvider extends EdmProviderAdapter {
@ -215,4 +211,6 @@ public class EdmTechProvider extends EdmProviderAdapter {
throw new ODataNotImplementedException();
}
}

View File

@ -38,21 +38,25 @@ import org.antlr.v4.runtime.Recognizer;
import org.antlr.v4.runtime.atn.ATNConfigSet;
import org.antlr.v4.runtime.atn.PredictionMode;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.Interval;
import org.apache.olingo.producer.core.uri.antlr.UriLexer;
import org.apache.olingo.producer.core.uri.antlr.UriParser;
import org.apache.olingo.producer.core.uri.antlr.UriParser.OdataRelativeUriEOFContext;
import org.apache.olingo.producer.core.uri.antlr.UriParser.TestContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.OdataRelativeUriEOFContext;
import org.apache.olingo.producer.core.uri.antlr.UriParserParser.TestContext;
public class ParserValidator {
private List<Exception> exceptions = new ArrayList<Exception>();
private ParserRuleContext root;
private String input = null;
private int exceptionOnStage = -1;
//private int exceptionOnStage = -1;
private Exception curException = null;
private Exception curWeakException = null;
//private Exception curWeakException = null;
private boolean allowFullContext;
private boolean allowContextSensitifity;
private boolean allowAmbiguity;
private int logLevel = 0;
private int lexerLog;
public ParserValidator run(String uri) {
return run(uri, false);
@ -64,9 +68,24 @@ public class ParserValidator {
public ParserValidator run(String uri, boolean searchMode) {
input = uri;
if (lexerLog> 0) {
(new TokenValidator()).log(lexerLog).run(input);
}
root = parseInput(uri, searchMode);
if (logLevel > 0) {
System.out.println(ParseTreeSerializer.getTreeAsText(root, new UriParserParser(null).getRuleNames()));
}
// reset for nest test
allowFullContext = false;
allowContextSensitifity = false;
allowAmbiguity = false;
exFirst();
return this;
@ -75,13 +94,27 @@ public class ParserValidator {
public ParserValidator runTest(String uri, boolean searchMode) {
input = uri;
root = parseInputTest(uri, searchMode);
if (logLevel > 0) {
System.out.println(ParseTreeSerializer.getTreeAsText(root, new UriParserParser(null).getRuleNames()));
}
// reset for nest test
allowFullContext = false;
allowContextSensitifity = false;
allowAmbiguity = false;
exFirst();
return this;
}
public ParserValidator log(int logLevel) {
this.logLevel = logLevel;
return this;
}
public ParserValidator aFC() {
allowFullContext = true;
return this;
@ -92,11 +125,16 @@ public class ParserValidator {
return this;
}
public ParserValidator aAM() {
allowAmbiguity = true;
return this;
}
public ParserValidator isText(String expected) {
assertEquals(null, curException);
assertEquals(0, exceptions.size());
String text = ParseTreeSerializer.getTreeAsText(root, new UriParser(null).getRuleNames());
String text = ParseTreeSerializer.getTreeAsText(root, new UriParserParser(null).getRuleNames());
assertEquals(expected, text);
return this;
}
@ -107,7 +145,7 @@ public class ParserValidator {
}
private OdataRelativeUriEOFContext parseInput(final String input, boolean searchMode) {
UriParser parser = null;
UriParserParser parser = null;
OdataRelativeUriEOFContext ret = null;
// Use 2 stage approach to improve performance
@ -124,7 +162,7 @@ public class ParserValidator {
ret = parser.odataRelativeUriEOF();
} catch (Exception ex) {
curException = ex;
exceptionOnStage = 1;
//exceptionOnStage = 1;
// stage= 2
try {
curException = null;
@ -135,7 +173,7 @@ public class ParserValidator {
ret = parser.odataRelativeUriEOF();
} catch (Exception ex1) {
curException = ex1;
exceptionOnStage = 2;
//exceptionOnStage = 2;
}
}
@ -143,7 +181,7 @@ public class ParserValidator {
}
private TestContext parseInputTest(final String input, boolean searchMode) {
UriParser parser = null;
UriParserParser parser = null;
TestContext ret = null;
// Use 2 stage approach to improve performance
@ -160,7 +198,7 @@ public class ParserValidator {
ret = parser.test();
} catch (Exception ex) {
curException = ex;
exceptionOnStage = 1;
//exceptionOnStage = 1;
// stage= 2
try {
curException = null;
@ -171,25 +209,28 @@ public class ParserValidator {
ret = parser.test();
} catch (Exception ex1) {
curException = ex1;
exceptionOnStage = 2;
//exceptionOnStage = 2;
}
}
return ret;
}
private UriParser getNewParser(final String input, boolean searchMode) {
private UriParserParser getNewParser(final String input, boolean searchMode) {
ANTLRInputStream inputStream = new ANTLRInputStream(input);
// UriLexer lexer = new UriLexer(inputStream);
UriLexer lexer = new UriLexer(inputStream);
lexer.setInSearch(searchMode);
//lexer.setInSearch(searchMode);
// lexer.removeErrorListeners();
lexer.addErrorListener(new ErrorCollector(this));
CommonTokenStream tokens = new CommonTokenStream(lexer);
UriParser parser = new UriParser(tokens);
parser.addErrorListener(new TraceErrorHandler());
UriParserParser parser = new UriParserParser(tokens);
if ((lexerLog >0 ) || (logLevel > 0)) {
parser.addParseListener(new TokenWriter());
}
parser.addErrorListener(new TraceErrorHandler<Object>());
parser.addErrorListener(new ErrorCollector(this));
return parser;
@ -202,6 +243,67 @@ public class ParserValidator {
this.tokenValidator = tokenValidator;
}
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine,
String msg, RecognitionException e) {
tokenValidator.exceptions.add(e);
trace(recognizer, offendingSymbol, line, charPositionInLine, msg, e);
fail("syntaxError"); // don't fail here we want to the error message at the caller
}
@Override
public void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, boolean exact,
BitSet ambigAlts, ATNConfigSet configs) {
if (!tokenValidator.allowAmbiguity) {
System.out.println("reportAmbiguity " +
ambigAlts + ":" + configs +
", input=" + recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)));
printStack(recognizer);
fail("reportAmbiguity");
} else {
System.out.println("allowed Ambiguity " +
ambigAlts + ":" + configs +
", input=" + recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)));
}
}
@Override
public void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex,
BitSet conflictingAlts, ATNConfigSet configs) {
// The grammar should be written in order to avoid attempting a full context parse because its negative
// impact on the performance, so trace and stop here
if (!tokenValidator.allowFullContext) {
printStack(recognizer);
fail("reportAttemptingFullContext");
} else {
System.out.println("allowed AttemptingFullContext");
}
}
@Override
public void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction,
ATNConfigSet configs) {
if (!tokenValidator.allowContextSensitifity) {
printStack(recognizer);
fail("reportContextSensitivity");
} else {
System.out.println("allowed ContextSensitivity");
}
}
private void printStack(Parser recognizer) {
List<String> stack = ((Parser) recognizer).getRuleInvocationStack();
Collections.reverse(stack);
System.out.println("rule stack: " + stack);
}
public void trace(final Recognizer<?, ?> recognizer, final Object offendingSymbol,
final int line, final int charPositionInLine, final String msg, final RecognitionException e) {
System.err.println("-");
@ -225,76 +327,36 @@ public class ParserValidator {
}
}
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine,
String msg, RecognitionException e) {
tokenValidator.exceptions.add(e);
trace(recognizer, offendingSymbol, line, charPositionInLine, msg, e);
fail("syntaxError"); // don't fail here we want to the error message at the caller
}
@Override
public void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, boolean exact,
BitSet ambigAlts, ATNConfigSet configs) {
printStack(recognizer);
fail("reportAmbiguity");
}
@Override
public void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex,
BitSet conflictingAlts, ATNConfigSet configs) {
// The grammar should be written in order to avoid attempting a full context parse because its negative
// impact on the performance, so trace and stop here
if (!tokenValidator.allowFullContext) {
printStack(recognizer);
fail("reportAttemptingFullContext");
}
}
private void printStack(Parser recognizer) {
List<String> stack = ((Parser) recognizer).getRuleInvocationStack();
Collections.reverse(stack);
System.err.println("rule stack: " + stack);
}
@Override
public void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction,
ATNConfigSet configs) {
if (!tokenValidator.allowContextSensitifity) {
printStack(recognizer);
fail("reportContextSensitivity");
}
}
}
public ParserValidator exFirst() {
try {
curWeakException = exceptions.get(0);
//curWeakException = exceptions.get(0);
} catch (IndexOutOfBoundsException ex) {
curWeakException = null;
//curWeakException = null;
}
return this;
}
public ParserValidator exLast() {
curWeakException = exceptions.get(exceptions.size() - 1);
//curWeakException = exceptions.get(exceptions.size() - 1);
return this;
}
public ParserValidator exAt(int index) {
try {
curWeakException = exceptions.get(index);
//curWeakException = exceptions.get(index);
} catch (IndexOutOfBoundsException ex) {
curWeakException = null;
//curWeakException = null;
}
return this;
}
public ParserValidator lexerlog(int i) {
this.lexerLog = i;
return this;
}
}

View File

@ -27,6 +27,7 @@ import java.util.List;
import org.antlr.v4.runtime.ANTLRErrorListener;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.Recognizer;
@ -41,16 +42,14 @@ public class TokenValidator {
private Token curToken = null;
private Exception curException = null;
private String input = null;
int logLevel = 0;
private int logLevel = 0;
private int mode;
public TokenValidator run(String uri) {
return run(uri, false);
}
public TokenValidator run(String uri, boolean searchMode) {
input = uri;
exceptions.clear();
tokens = parseInput(uri, searchMode);
tokens = parseInput(uri);
if (logLevel > 0) {
showTokens();
}
@ -92,6 +91,26 @@ public class TokenValidator {
return this;
}
public TokenValidator isAllText(String expected) {
String tmp = "";
for (Token curToken : tokens) {
tmp += curToken.getText();
}
assertEquals(expected, tmp);
return this;
}
public TokenValidator isAllInput() {
String tmp = "";
for (Token curToken : tokens) {
tmp += curToken.getText();
}
assertEquals(input, tmp);
return this;
}
public TokenValidator isInput() {
assertEquals(input, curToken.getText());
return this;
@ -108,11 +127,11 @@ public class TokenValidator {
return this;
}
private List<? extends Token> parseInput(final String input, boolean searchMode) {
private List<? extends Token> parseInput(final String input) {
ANTLRInputStream inputStream = new ANTLRInputStream(input);
UriLexer lexer = new UriLexer(inputStream);
lexer.setInSearch(searchMode);
UriLexer lexer = new TestUriLexer(this,inputStream, mode);
// lexer.setInSearch(searchMode);
// lexer.removeErrorListeners();
lexer.addErrorListener(new ErrorCollector(this));
return lexer.getAllTokens();
@ -165,6 +184,37 @@ public class TokenValidator {
return this;
}
private static class TestUriLexer extends UriLexer {
private TokenValidator validator;
public TestUriLexer(TokenValidator validator, CharStream input, int mode) {
super(input);
super.mode(mode);
this.validator = validator;
}
@Override
public void pushMode(int m) {
if (validator.logLevel > 0) {
System.out.println("OnMode" + ": " + UriLexer.modeNames[m]);
}
super.pushMode(m);
}
@Override
public int popMode() {
int m = super.popMode();
if (validator.logLevel > 0) {
System.out.println("OnMode" + ": " + UriLexer.modeNames[m]);
}
return m;
}
}
private static class ErrorCollector implements ANTLRErrorListener {
TokenValidator tokenValidator;
@ -198,4 +248,8 @@ public class TokenValidator {
}
public void globalMode(int mode) {
this.mode = mode;
}
}

View File

@ -0,0 +1,54 @@
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
******************************************************************************/
package org.apache.olingo.producer.core.testutil;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ErrorNode;
import org.antlr.v4.runtime.tree.ParseTreeListener;
import org.antlr.v4.runtime.tree.TerminalNode;
import org.apache.olingo.producer.core.uri.antlr.UriLexer;
public class TokenWriter implements ParseTreeListener {
@Override
public void visitTerminal(TerminalNode node) {
String out = String.format("%1$-" + 20 + "s", node.getText()); ;
out += UriLexer.tokenNames[node.getSymbol().getType()];
System.out.println(out);
}
@Override
public void visitErrorNode(ErrorNode node) {
// TODO Auto-generated method stub
}
@Override
public void enterEveryRule(ParserRuleContext ctx) {
// TODO Auto-generated method stub
}
@Override
public void exitEveryRule(ParserRuleContext ctx) {
// TODO Auto-generated method stub
}
}

View File

@ -58,7 +58,7 @@ public class UriResourcePathValidator {
}
public UriResourcePathValidator last() {
// TODO
// TODO implement
// uriPathInfo = uriInfo.getLastUriPathInfo();
return this;
}

View File

@ -18,7 +18,6 @@
******************************************************************************/
package org.apache.olingo.producer.core.uri.antlr;
import org.antlr.v4.runtime.LexerNoViableAltException;
import org.apache.olingo.producer.core.testutil.TokenValidator;
import org.junit.Test;
@ -56,17 +55,32 @@ public class TestLexer {
test = new TokenValidator();
}
@Test
// @Test
public void test() {
test.run("$batch");
}
test.globalMode(UriLexer.MODE_QUERY);
// test.log(1).run("$filter='ABC'").isText("ABC").isType(UriLexer.STRING);
// test.log(1).run("ODI?$filter=1 add 2 mul 3");
// test.log(1).run("1 + 2 + 3");
// test.log(1).run("ODI?$filter=geography'SRID=0;Collection(LineString(142.1 64.1,3.14 2.78))'");
// test.log(1).run("ODI?$filter=1 mul 2 add 3");
// test.globalMode(UriLexer.MODE_QUERY);
// test.log(1).run("$entity?$id='A'&sdf=ssdf&$id=ABC");
// test.run("$skiptoken=top");
/*
* test.log(1).run("ODI?$filter=geo.intersects("
* + "geometry'SRID=0;Point(142.1 64.1)',"
* + "geometry'SRID=0;Polygon((1 1,1 1),(1 1,2 2,3 3,1 1))')");
*/
}
// ;------------------------------------------------------------------------------
// ; 0. URI
// ;------------------------------------------------------------------------------
@Test
public void testUriTokens() {
test.globalMode(UriLexer.MODE_QUERY);
test.run("#").isText("#").isType(UriLexer.FRAGMENT);
test.run("$count").isText("$count").isType(UriLexer.COUNT);
test.run("$ref").isText("$ref").isType(UriLexer.REF);
@ -78,38 +92,44 @@ public class TestLexer {
// ;------------------------------------------------------------------------------
@Test
public void testQueryOptionsTokens() {
test.run("$skip=1").isText("$skip=1").isType(UriLexer.SKIP);
test.run("$skip=2").isText("$skip=2").isType(UriLexer.SKIP);
test.run("$skip=123").isText("$skip=123").isType(UriLexer.SKIP);
test.run("$skip=A").isExType(LexerNoViableAltException.class);
test.run("$top=1").isText("$top=1").isType(UriLexer.TOP);
test.run("$top=2").isText("$top=2").isType(UriLexer.TOP);
test.run("$top=123").isText("$top=123").isType(UriLexer.TOP);
test.run("$top=A").isExType(LexerNoViableAltException.class);
test.globalMode(UriLexer.MODE_QUERY);
test.run("$skip=1").isAllText("$skip=1").isType(UriLexer.SKIP);
test.run("$skip=2").isAllText("$skip=2").isType(UriLexer.SKIP);
test.run("$skip=123").isAllText("$skip=123").isType(UriLexer.SKIP);
test.run("$levels=1").isText("$levels=1").isType(UriLexer.LEVELS);
test.run("$levels=2").isText("$levels=2").isType(UriLexer.LEVELS);
test.run("$levels=123").isText("$levels=123").isType(UriLexer.LEVELS);
test.run("$levels=max").isText("$levels=max").isType(UriLexer.LEVELS);
test.run("$levels=A").isExType(LexerNoViableAltException.class);
// TODO check and add error handling
// test.run("$skip=A").isExType(LexerNoViableAltException.class);
test.run("$format=atom").isText("$format=atom").isType(UriLexer.FORMAT);
test.run("$format=json").isText("$format=json").isType(UriLexer.FORMAT);
test.run("$format=xml").isText("$format=xml").isType(UriLexer.FORMAT);
test.run("$format=abc/def").isText("$format=abc/def").isType(UriLexer.FORMAT);
test.run("$format=abc").isExType(LexerNoViableAltException.class);
test.run("$top=1").isAllText("$top=1").isType(UriLexer.TOP);
test.run("$top=2").isAllText("$top=2").isType(UriLexer.TOP);
test.run("$top=123").isAllText("$top=123").isType(UriLexer.TOP);
// TODO check and add error handling
// test.run("$top=A").isExType(LexerNoViableAltException.class);
test.run("$id=123").isText("$id=123").isType(UriLexer.ID);
test.run("$id=ABC").isText("$id=ABC").isType(UriLexer.ID);
test.run("$levels=1").isAllText("$levels=1").isType(UriLexer.LEVELS);
test.run("$levels=2").isAllText("$levels=2").isType(UriLexer.LEVELS);
test.run("$levels=123").isAllText("$levels=123").isType(UriLexer.LEVELS);
test.run("$levels=max").isAllText("$levels=max").isType(UriLexer.LEVELS);
// TODO check and add error handling
// test.run("$levels=A").isExType(LexerNoViableAltException.class);
test.run("$skiptoken=ABC").isText("$skiptoken=ABC").isType(UriLexer.SKIPTOKEN);
test.run("$skiptoken=ABC").isText("$skiptoken=ABC").isType(UriLexer.SKIPTOKEN);
test.run("$format=atom").isAllText("$format=atom").isType(UriLexer.FORMAT);
test.run("$format=json").isAllText("$format=json").isType(UriLexer.FORMAT);
test.run("$format=xml").isAllText("$format=xml").isType(UriLexer.FORMAT);
test.run("$format=abc/def").isAllText("$format=abc/def").isType(UriLexer.FORMAT);
// TODO check and add error handling
// test.run("$format=abc").isExType(LexerNoViableAltException.class);
test.run("\"ABC\"", true).isText("\"ABC\"").isType(UriLexer.SEARCHPHRASE);
test.run("$id=123").isAllText("$id=123").isType(UriLexer.ID);
test.run("$id=ABC").isAllText("$id=ABC").isType(UriLexer.ID);
test.run("$id=" + cQCHAR_NO_AMP + "", true).isInput().isType(UriLexer.ID);
test.run("$skiptoken=ABC").isAllText("$skiptoken=ABC").isType(UriLexer.SKIPTOKEN);
test.run("$skiptoken=ABC").isAllText("$skiptoken=ABC").isType(UriLexer.SKIPTOKEN);
test.run("$search=\"ABC\"").isAllText("$search=\"ABC\"").isType(UriLexer.SEARCH);
test.run("$search=ABC").isAllText("$search=ABC").isType(UriLexer.SEARCH);
test.run("$search=\"A%20B%20C\"").isAllText("$search=\"A%20B%20C\"").isType(UriLexer.SEARCH);
}
// ;------------------------------------------------------------------------------
@ -117,126 +137,135 @@ public class TestLexer {
// ;------------------------------------------------------------------------------
@Test
public void testQueryExpressions() {
test.globalMode(UriLexer.MODE_SYSTEM_QUERY);
// assertEquals("expected","actual");
test.run("$it").isText("$it").isType(UriLexer.IMPLICIT_VARIABLE_EXPR);
test.run("$itabc").isText("$it").isType(UriLexer.IMPLICIT_VARIABLE_EXPR);
// TODO check and add error handling
// test.run("$itabc").isText("$it").isType(UriLexer.IMPLICIT_VARIABLE_EXPR);
test.run("contains").isText("contains").isType(UriLexer.CONTAINS);
test.run("$filter=contains(").at(2).isText("contains(").isType(UriLexer.CONTAINS_WORD);
test.run("containsabc").isText("containsabc").isType(UriLexer.ODATAIDENTIFIER); // test that this is a ODI
test.run("$filter=containsabc").at(2).isText("containsabc")
.isType(UriLexer.ODATAIDENTIFIER); // test that this is a ODI
test.run("startswith").isText("startswith").isType(UriLexer.STARTSWITH);
test.run("endswith").isText("endswith").isType(UriLexer.ENDSWITH);
test.run("length").isText("length").isType(UriLexer.LENGTH);
test.run("indexof").isText("indexof").isType(UriLexer.INDEXOF);
test.run("substring").isText("substring").isType(UriLexer.SUBSTRING);
test.run("tolower").isText("tolower").isType(UriLexer.TOLOWER);
test.run("toupper").isText("toupper").isType(UriLexer.TOUPPER);
test.run("trim").isText("trim").isType(UriLexer.TRIM);
test.run("concat").isText("concat").isType(UriLexer.CONCAT);
test.run("$filter=startswith(").at(2).isText("startswith(").isType(UriLexer.STARTSWITH_WORD);
test.run("$filter=endswith(").at(2).isText("endswith(").isType(UriLexer.ENDSWITH_WORD);
test.run("$filter=length(").at(2).isText("length(").isType(UriLexer.LENGTH_WORD);
test.run("$filter=indexof(").at(2).isText("indexof(").isType(UriLexer.INDEXOF_WORD);
test.run("$filter=substring(").at(2).isText("substring(").isType(UriLexer.SUBSTRING_WORD);
test.run("$filter=tolower(").at(2).isText("tolower(").isType(UriLexer.TOLOWER_WORD);
test.run("$filter=toupper(").at(2).isText("toupper(").isType(UriLexer.TOUPPER_WORD);
test.run("$filter=trim(").at(2).isText("trim(").isType(UriLexer.TRIM_WORD);
test.run("$filter=concat(").at(2).isText("concat(").isType(UriLexer.CONCAT_WORD);
}
// ;------------------------------------------------------------------------------
// ; 5. JSON format for function parameters
// ;------------------------------------------------------------------------------
@Test
public void testQueryJSON_and_otheres() {
// QUOTATION_MARK
test.run("\"").isText("\"").isType(UriLexer.QUOTATION_MARK);
test.run("%22").isText("%22").isType(UriLexer.QUOTATION_MARK);
// Lexer rule QCHAR_UNESCAPED
test.run("\"abc\"").isText("\"abc\"").isType(UriLexer.STRING_IN_JSON);
// Lexer rule QCHAR_JSON_SPECIAL
test.run("\"" + cQCHAR_JSON_SPECIAL + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
// Lexer rule CHAR_IN_JSON
test.run("\"" + cQCHAR_UNESCAPED + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cQCHAR_JSON_SPECIAL + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
// Lexer rule ESCAPE CHAR_IN_JSON
test.run("\"" + cESCAPE + cQUOTATION_MARK + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + cESCAPE + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "/" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "%2F" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "b" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "f" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "n" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "r" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "t" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
test.run("\"" + cESCAPE + "u12AB" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
}
// ;------------------------------------------------------------------------------
// ; 6. Names and identifiers
// ;------------------------------------------------------------------------------
@Test
public void testNamesAndIdentifiers() {
test.run("Binary").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Boolean").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Byte").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Date").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("DateTimeOffset").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Decimal").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Double").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Duration").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Guid").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Int16").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Int32").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Int64").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("SByte").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Single").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Stream").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("String").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("TimeOfDay").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Geography").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run("Geometry").isInput().isType(UriLexer.PRIMITIVETYPENAME);
String g = "Geography";
test.run(g ).isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "Collection").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "MultiLineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "MultiPoint").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "MultiPolygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "Point").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "Polygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
g = "Geometry";
test.run(g ).isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "Collection").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "MultiLineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "MultiPoint").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "MultiPolygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "Point").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
test.run(g + "Polygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
}
@Test
public void testNameClaches() {
/* test.run("Collection").isInput().isType(UriLexer.COLLECTION_CS_FIX);
test.run("LineString").isInput().isType(UriLexer.LINESTRING_CS_FIX);
test.run("MultiLineString").isInput().isType(UriLexer.MULTILINESTRING_CS_FIX);
test.run("MultiPoint").isInput().isType(UriLexer.MULTIPOINT_CS_FIX);
test.run("MultiPolygon").isInput().isType(UriLexer.MULTIPOLYGON_CS_FIX);
test.run("Point").isInput().isType(UriLexer.POINT_CS_FIX);
test.run("Polygon").isInput().isType(UriLexer.POLYGON_CS_FIX);
test.run("Srid").isInput().isType(UriLexer.SRID_CS);*/
}
/*
* // ;------------------------------------------------------------------------------
* // ; 5. JSON format for function parameters
* // ;------------------------------------------------------------------------------
*
* @Test
* public void testQueryJSON_and_otheres() {
* // QUOTATION_MARK
* test.run("\"").isText("\"").isType(UriLexer.QUOTATION_MARK);
* test.run("%22").isText("%22").isType(UriLexer.QUOTATION_MARK);
*
* // Lexer rule QCHAR_UNESCAPED
* test.run("\"abc\"").isText("\"abc\"").isType(UriLexer.STRING_IN_JSON);
*
* // Lexer rule QCHAR_JSON_SPECIAL
* test.run("\"" + cQCHAR_JSON_SPECIAL + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
*
* // Lexer rule CHAR_IN_JSON
* test.run("\"" + cQCHAR_UNESCAPED + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cQCHAR_JSON_SPECIAL + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
*
* // Lexer rule ESCAPE CHAR_IN_JSON
* test.run("\"" + cESCAPE + cQUOTATION_MARK + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + cESCAPE + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "/" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "%2F" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "b" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "f" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "n" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "r" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "t" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* test.run("\"" + cESCAPE + "u12AB" + "\"").isInput().isType(UriLexer.STRING_IN_JSON);
* }
*
* // ;------------------------------------------------------------------------------
* // ; 6. Names and identifiers
* // ;------------------------------------------------------------------------------
*
* @Test
* public void testNamesAndIdentifiers() {
*
* test.run("Binary").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Boolean").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Byte").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Date").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("DateTimeOffset").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Decimal").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Double").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Duration").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Guid").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Int16").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Int32").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Int64").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("SByte").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Single").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Stream").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("String").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("TimeOfDay").isInput().isType(UriLexer.PRIMITIVETYPENAME);
*
* test.run("Geography").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run("Geometry").isInput().isType(UriLexer.PRIMITIVETYPENAME);
*
* String g = "Geography";
* test.run(g ).isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "Collection").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "MultiLineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "MultiPoint").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "MultiPolygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "Point").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "Polygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
*
* g = "Geometry";
* test.run(g ).isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "Collection").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "MultiLineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "MultiPoint").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "MultiPolygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "Point").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "LineString").isInput().isType(UriLexer.PRIMITIVETYPENAME);
* test.run(g + "Polygon").isInput().isType(UriLexer.PRIMITIVETYPENAME);
*
* }
*
* @Test
* public void testNameClaches() {
* -- test.run("Collection").isInput().isType(UriLexer.COLLECTION_CS_FIX);
* test.run("LineString").isInput().isType(UriLexer.LINESTRING_CS_FIX);
* test.run("MultiLineString").isInput().isType(UriLexer.MULTILINESTRING_CS_FIX);
* test.run("MultiPoint").isInput().isType(UriLexer.MULTIPOINT_CS_FIX);
* test.run("MultiPolygon").isInput().isType(UriLexer.MULTIPOLYGON_CS_FIX);
* test.run("Point").isInput().isType(UriLexer.POINT_CS_FIX);
* test.run("Polygon").isInput().isType(UriLexer.POLYGON_CS_FIX);
* test.run("Srid").isInput().isType(UriLexer.SRID_CS);--
* }
*/
// ;------------------------------------------------------------------------------
// ; 7. Literal Data Values
// ;------------------------------------------------------------------------------
@Test
public void testLiteralDataValues() {
test.globalMode(UriLexer.MODE_SYSTEM_QUERY);
// null
test.run("null").isInput().isType(UriLexer.NULLVALUE);
@ -246,7 +275,7 @@ public class TestLexer {
test.run("binary'ABCD'").isInput().isType(UriLexer.BINARY);
test.run("BiNaRy'ABCD'").isInput().isType(UriLexer.BINARY);
// not a binary
// not a binary TODO add error handling
test.run("x'ABCDA'")
.at(0).isText("x").isType(UriLexer.ODATAIDENTIFIER)
.at(1).isText("'ABCDA'").isType(UriLexer.STRING);
@ -267,6 +296,7 @@ public class TestLexer {
test.run("+123456789").isInput().isType(UriLexer.INT);
test.run("-123").isInput().isType(UriLexer.INT);
test.run("-123456789").isInput().isType(UriLexer.INT);
// Lexer rule DECIMAL
test.run("0.1").isInput().isType(UriLexer.DECIMAL);
test.run("1.1").isInput().isType(UriLexer.DECIMAL);
@ -274,6 +304,7 @@ public class TestLexer {
test.run("+1.1").isInput().isType(UriLexer.DECIMAL);
test.run("-0.1").isInput().isType(UriLexer.DECIMAL);
test.run("-1.1").isInput().isType(UriLexer.DECIMAL);
// Lexer rule EXP
test.run("1.1e+1").isInput().isType(UriLexer.DECIMAL);
test.run("1.1e-1").isInput().isType(UriLexer.DECIMAL);
@ -283,7 +314,6 @@ public class TestLexer {
test.run("INF").isInput().isType(UriLexer.NANINFINITY);
// Lexer rule GUID
test.run("guid'1234ABCD-12AB-23CD-45EF-123456780ABC'").isInput().isType(UriLexer.GUID);
test.run("GuId'1234ABCD-12AB-23CD-45EF-123456780ABC'").isInput().isType(UriLexer.GUID);
@ -337,74 +367,72 @@ public class TestLexer {
test.run("timeofday'20:15:01.02'").isInput().isType(UriLexer.TIMEOFDAY);
test.run("timeofday'20:15:01.02'").isInput().isType(UriLexer.TIMEOFDAY);
//String
test.run("'ABC'").isInput().isType(UriLexer.STRING);
// String
test.run("'ABC'").isText("'ABC'").isType(UriLexer.STRING);
test.run("'A%20C'").isInput().isType(UriLexer.STRING);
test.run("'%20%20%20ABC'").isInput().isType(UriLexer.STRING);
}
// ;------------------------------------------------------------------------------
// ; 0. misc
// ;------------------------------------------------------------------------------
/*
* // ;------------------------------------------------------------------------------
* // ; 0. misc
* // ;------------------------------------------------------------------------------
*
* @Test
* public void testCriticalOrder() {
* // Test lexer rule STRING
* test.run("'abc'").isInput().isType(UriLexer.STRING);
*
* // Test lexer rule SEARCHWORD
* test.run("abc", true).isInput().isType(UriLexer.SEARCHWORD);
*
* // Test lexer rule SEARCHPHRASE
* test.run("\"abc\"", true).isInput().isType(UriLexer.SEARCHPHRASE);
*
* // Test lexer rule ODATAIDENTIFIER
* test.run("abc").isInput().isType(UriLexer.ODATAIDENTIFIER);
*
* test.run("\"abc\"").isInput().isType(UriLexer.STRING_IN_JSON);
* }
*/
@Test
public void testCriticalOrder() {
// Test lexer rule STRING
test.run("'abc'").isInput().isType(UriLexer.STRING);
// Test lexer rule SEARCHWORD
test.run("abc", true).isInput().isType(UriLexer.SEARCHWORD);
// Test lexer rule SEARCHPHRASE
test.run("\"abc\"", true).isInput().isType(UriLexer.SEARCHPHRASE);
// Test lexer rule ODATAIDENTIFIER
test.run("abc").isInput().isType(UriLexer.ODATAIDENTIFIER);
test.run("\"abc\"").isInput().isType(UriLexer.STRING_IN_JSON);
}
//@Test
public void testDelims() {
String reserved = "/";
test.globalMode(UriLexer.MODE_QUERY);
// Test lexer rule UNRESERVED
test.run("$format=A/" + cUNRESERVED).isInput().isType(UriLexer.FORMAT);
test.run("$format=A/" + cUNRESERVED + reserved).isText("$format=A/" + cUNRESERVED).isType(UriLexer.FORMAT);
test.run("$format=A/" + cUNRESERVED).isAllInput().isType(UriLexer.FORMAT);
test.run("$format=A/" + cUNRESERVED + reserved).isType(UriLexer.FORMAT).at(4).isText(cUNRESERVED);
// Test lexer rule PCT_ENCODED
test.run("$format=A/" + cPCT_ENCODED).isInput().isType(UriLexer.FORMAT);
test.run("$format=A/" + cPCT_ENCODED + reserved).isText("$format=A/" + cPCT_ENCODED).isType(UriLexer.FORMAT);
test.run("$format=A/" + cPCT_ENCODED).isAllInput().isType(UriLexer.FORMAT);
test.run("$format=A/" + cPCT_ENCODED + reserved).isType(UriLexer.FORMAT).at(4).isText(cPCT_ENCODED);
// Test lexer rule SUB_DELIMS
test.run("$format=A/" + cSUB_DELIMS).isInput().isType(UriLexer.FORMAT);
test.run("$format=A/" + cSUB_DELIMS + reserved).isText("$format=A/" + cSUB_DELIMS).isType(UriLexer.FORMAT);
test.run("$format=A/" + cSUB_DELIMS).isAllInput().isType(UriLexer.FORMAT);
test.run("$format=A/" + cSUB_DELIMS + reserved).isType(UriLexer.FORMAT).at(4).isText(cSUB_DELIMS);
// Test lexer rule PCHAR rest
test.run("$format=A/:@").isText("$format=A/:@").isType(UriLexer.FORMAT);
test.run("$format=A/:@" + reserved).isText("$format=A/:@").isType(UriLexer.FORMAT);
test.run("$format=A/:@").isAllText("$format=A/:@").isType(UriLexer.FORMAT);
test.run("$format=A/:@" + reserved).isType(UriLexer.FORMAT).at(4).isText(":@");
// Test lexer rule PCHAR all
test.run("$format=" + cPCHAR + "/" + cPCHAR).isInput().isType(UriLexer.FORMAT);
test.run("$format=" + cPCHAR + "/" + cPCHAR).isAllInput().isType(UriLexer.FORMAT);
test.run("$format=" + cPCHAR + "/" + cPCHAR + reserved)
.isText("$format=" + cPCHAR + "/" + cPCHAR)
.isType(UriLexer.FORMAT);
.isType(UriLexer.FORMAT).at(4)
.isText(cPCHAR);
// Test lexer rule QCHAR_NO_AMP
String amp = "&";
// Test lexer rule UNRESERVED
test.run("$id=" + cUNRESERVED).isInput().isType(UriLexer.ID);
test.run("$id=" + cUNRESERVED + amp).isText("$id=" + cUNRESERVED).isType(UriLexer.ID);
test.run("$id=" + cUNRESERVED).isAllInput().isType(UriLexer.ID);
test.run("$id=" + cUNRESERVED + amp).isType(UriLexer.ID).at(2).isText(cUNRESERVED);
// Test lexer rule PCT_ENCODED
test.run("$id=" + cPCT_ENCODED).isInput().isType(UriLexer.ID);
test.run("$id=" + cPCT_ENCODED + amp).isText("$id=" + cPCT_ENCODED).isType(UriLexer.ID);
test.run("$id=" + cPCT_ENCODED).isAllInput().isType(UriLexer.ID);
test.run("$id=" + cPCT_ENCODED + amp).isType(UriLexer.ID).at(2).isText(cPCT_ENCODED);
// Test lexer rule OTHER_DELIMS
test.run("$id=" + cOTHER_DELIMS).isInput().isType(UriLexer.ID);
test.run("$id=" + cOTHER_DELIMS + amp).isText("$id=" + cOTHER_DELIMS).isType(UriLexer.ID);
test.run("$id=" + cOTHER_DELIMS).isAllInput().isType(UriLexer.ID);
test.run("$id=" + cOTHER_DELIMS + amp).isType(UriLexer.ID).at(2).isText(cOTHER_DELIMS);
// Lexer rule QCHAR_NO_AMP rest
test.run("$id=:@/?$'=").isText("$id=:@/?$'=").isType(UriLexer.ID);
test.run("$id=:@/?$'=" + amp).isText("$id=:@/?$'=").isType(UriLexer.ID);
// Test lexer rule QCHAR_NO_AMP_DQUOTE
test.run("\"" + cQCHAR_NO_AMP_DQUOTE + "\"", true).isInput().isType(UriLexer.SEARCHPHRASE);
test.run("$id=:@/?$'=").isAllText("$id=:@/?$'=").isType(UriLexer.ID);
test.run("$id=:@/?$'=" + amp).isType(UriLexer.ID).at(2).isText(":@/?$'=");
}