Painless: Add an Ingest Script Processor Example (#32302)

This commit adds two pieces. The first is a small set of documentation providing 
instructions on how to get setup to run context examples. This will require a download 
similar to how Kibana works for some of the examples. The second is an ingest processor 
example using the downloaded data. More examples will follow as ideally one per PR. 
This also adds a set of tests to individually test each script as a unit test.
This commit is contained in:
Jack Conradson 2018-08-09 14:24:55 -07:00 committed by GitHub
parent be54ba39c4
commit 293c8a2b24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 555 additions and 7 deletions

View File

@ -686,6 +686,7 @@
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionScriptEngine.java" checks="LineLength" /> <suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionScriptEngine.java" checks="LineLength" />
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]StoredExpressionTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]StoredExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ContextExampleTests.java" checks="LineLength" />
<suppress files="modules[/\\]reindex[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]reindex[/\\]TransportUpdateByQueryAction.java" checks="LineLength" /> <suppress files="modules[/\\]reindex[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]reindex[/\\]TransportUpdateByQueryAction.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuCollationTokenFilterFactory.java" checks="LineLength" /> <suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuCollationTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuFoldingTokenFilterFactory.java" checks="LineLength" /> <suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuFoldingTokenFilterFactory.java" checks="LineLength" />

View File

@ -14,6 +14,8 @@ specialized code may define new ways to use a Painless script.
|==== |====
| Name | Painless Documentation | Name | Painless Documentation
| Elasticsearch Documentation | Elasticsearch Documentation
| Ingest processor | <<painless-ingest-processor-context, Painless Documentation>>
| {ref}/script-processor.html[Elasticsearch Documentation]
| Update | <<painless-update-context, Painless Documentation>> | Update | <<painless-update-context, Painless Documentation>>
| {ref}/docs-update.html[Elasticsearch Documentation] | {ref}/docs-update.html[Elasticsearch Documentation]
| Update by query | <<painless-update-by-query-context, Painless Documentation>> | Update by query | <<painless-update-by-query-context, Painless Documentation>>
@ -44,12 +46,12 @@ specialized code may define new ways to use a Painless script.
| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
| Bucket aggregation | <<painless-bucket-agg-context, Painless Documentation>> | Bucket aggregation | <<painless-bucket-agg-context, Painless Documentation>>
| {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] | {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation]
| Ingest processor | <<painless-ingest-processor-context, Painless Documentation>>
| {ref}/script-processor.html[Elasticsearch Documentation]
| Watcher condition | <<painless-watcher-condition-context, Painless Documentation>> | Watcher condition | <<painless-watcher-condition-context, Painless Documentation>>
| {xpack-ref}/condition-script.html[Elasticsearch Documentation] | {xpack-ref}/condition-script.html[Elasticsearch Documentation]
| Watcher transform | <<painless-watcher-transform-context, Painless Documentation>> | Watcher transform | <<painless-watcher-transform-context, Painless Documentation>>
| {xpack-ref}/transform-script.html[Elasticsearch Documentation] | {xpack-ref}/transform-script.html[Elasticsearch Documentation]
|==== |====
include::painless-contexts/painless-context-examples.asciidoc[]
include::painless-contexts/index.asciidoc[] include::painless-contexts/index.asciidoc[]

View File

@ -1,3 +1,5 @@
include::painless-ingest-processor-context.asciidoc[]
include::painless-update-context.asciidoc[] include::painless-update-context.asciidoc[]
include::painless-update-by-query-context.asciidoc[] include::painless-update-by-query-context.asciidoc[]
@ -28,8 +30,6 @@ include::painless-metric-agg-reduce-context.asciidoc[]
include::painless-bucket-agg-context.asciidoc[] include::painless-bucket-agg-context.asciidoc[]
include::painless-ingest-processor-context.asciidoc[]
include::painless-watcher-condition-context.asciidoc[] include::painless-watcher-condition-context.asciidoc[]
include::painless-watcher-transform-context.asciidoc[] include::painless-watcher-transform-context.asciidoc[]

View File

@ -0,0 +1,80 @@
[[painless-context-examples]]
=== Context examples
To run the examples, index the sample seat data into Elasticsearch. The examples
must be run sequentially to work correctly.
. Download the
https://download.elastic.co/demos/painless/contexts/seats.json[seat data]. This
data set contains booking information for a collection of plays. Each document
represents a single seat for a play at a particular theater on a specific date
and time.
+
Each document contains the following fields:
+
`theatre` ({ref}/keyword.html[`keyword`])::
The name of the theater the play is in.
`play` ({ref}/text.html[`text`])::
The name of the play.
`actors` ({ref}/text.html[`text`])::
A list of actors in the play.
`row` ({ref}/number.html[`integer`])::
The row of the seat.
`number` ({ref}/number.html[`integer`])::
The number of the seat within a row.
`cost` ({ref}/number.html[`double`])::
The cost of the ticket for the seat.
`sold` ({ref}/boolean.html[`boolean`])::
Whether or not the seat is sold.
`datetime` ({ref}/date.html[`date`])::
The date and time of the play as a date object.
`date` ({ref}/keyword.html[`keyword`])::
The date of the play as a keyword.
`time` ({ref}/keyword.html[`keyword`])::
The time of the play as a keyword.
. {defguide}/running-elasticsearch.html[Start] Elasticsearch. Note these
examples assume Elasticsearch and Kibana are running locally. To use the Console
editor with a remote Kibana instance, click the settings icon and enter the
Console URL. To submit a cURL request to a remote Elasticsearch instance, edit
the request URL.
. Create {ref}/mapping.html[mappings] for the sample data:
+
[source,js]
----
PUT /seats
{
"mappings": {
"seat": {
"properties": {
"theatre": { "type": "keyword" },
"play": { "type": "text" },
"actors": { "type": "text" },
"row": { "type": "integer" },
"number": { "type": "integer" },
"cost": { "type": "double" },
"sold": { "type": "boolean" },
"datetime": { "type": "date" },
"date": { "type": "keyword" },
"time": { "type": "keyword" }
}
}
}
}
----
+
// CONSOLE
. Run the <<painless-ingest-processor-context, ingest processor context>>
example. This sets up a script ingest processor used on each document as the
seat data is indexed.
. Index the seat data:
+
[source,js]
----
curl -XPOST localhost:9200/seats/seat/_bulk?pipeline=seats -H "Content-Type: application/x-ndjson" --data-binary "@/<local-file-path>/seats.json"
----
// NOTCONSOLE

View File

@ -27,7 +27,7 @@ to modify documents upon insertion.
{ref}/mapping-type-field.html[`ctx['_type']`]:: {ref}/mapping-type-field.html[`ctx['_type']`]::
Modify this to change the type for the current document. Modify this to change the type for the current document.
`ctx` (`Map`, read-only):: `ctx` (`Map`)::
Modify the values in the `Map/List` structure to add, modify, or delete Modify the values in the `Map/List` structure to add, modify, or delete
the fields of a document. the fields of a document.
@ -39,3 +39,157 @@ void::
*API* *API*
The standard <<painless-api-reference, Painless API>> is available. The standard <<painless-api-reference, Painless API>> is available.
*Example*
To run this example, first follow the steps in
<<painless-context-examples, context examples>>.
The seat data contains:
* A date in the format `YYYY-MM-DD` where the second digit of both month and day
is optional.
* A time in the format HH:MM* where the second digit of both hours and minutes
is optional. The star (*) represents either the `String` `AM` or `PM`.
The following ingest script processes the date and time `Strings` and stores the
result in a `datetime` field.
[source,Painless]
----
String[] split(String s, char d) { <1>
int count = 0;
for (char c : s.toCharArray()) { <2>
if (c == d) {
++count;
}
}
if (count == 0) {
return new String[] {s}; <3>
}
String[] r = new String[count + 1]; <4>
int i0 = 0, i1 = 0;
count = 0;
for (char c : s.toCharArray()) { <5>
if (c == d) {
r[count++] = s.substring(i0, i1);
i0 = i1 + 1;
}
++i1;
}
r[count] = s.substring(i0, i1); <6>
return r;
}
String[] dateSplit = split(ctx.date, (char)"-"); <7>
String year = dateSplit[0].trim();
String month = dateSplit[1].trim();
if (month.length() == 1) { <8>
month = "0" + month;
}
String day = dateSplit[2].trim();
if (day.length() == 1) { <9>
day = "0" + day;
}
boolean pm = ctx.time.substring(ctx.time.length() - 2).equals("PM"); <10>
String[] timeSplit = split(
ctx.time.substring(0, ctx.time.length() - 2), (char)":"); <11>
int hours = Integer.parseInt(timeSplit[0].trim());
int minutes = Integer.parseInt(timeSplit[1].trim());
if (pm) { <12>
hours += 12;
}
String dts = year + "-" + month + "-" + day + "T" +
(hours < 10 ? "0" + hours : "" + hours) + ":" +
(minutes < 10 ? "0" + minutes : "" + minutes) +
":00+08:00"; <13>
ZonedDateTime dt = ZonedDateTime.parse(
dts, DateTimeFormatter.ISO_OFFSET_DATE_TIME); <14>
ctx.datetime = dt.getLong(ChronoField.INSTANT_SECONDS)*1000L; <15>
----
<1> Creates a `split` <<painless-functions, function>> to split a
<<string-type, `String`>> type value using a <<primitive-types, `char`>>
type value as the delimiter. This is useful for handling the necessity of
pulling out the individual pieces of the date and time `Strings` from the
original seat data.
<2> The first pass through each `char` in the `String` collects how many new
`Strings` the original is split into.
<3> Returns the original `String` if there are no instances of the delimiting
`char`.
<4> Creates an <<array-type, array type>> value to collect the split `Strings`
into based on the number of `char` delimiters found in the first pass.
<5> The second pass through each `char` in the `String` collects each split
substring into an array type value of `Strings`.
<6> Collects the last substring into the array type value of `Strings`.
<7> Uses the `split` function to separate the date `String` from the seat data
into year, month, and day `Strings`.
Note::
* The use of a `String` type value to `char` type value
<<string-character-casting, cast>> as part of the second argument since
character literals do not exist.
* The use of the `ctx` ingest processor context variable to retrieve the
data from the `date` field.
<8> Appends the <<string-literals, string literal>> `"0"` value to a single
digit month since the format of the seat data allows for this case.
<9> Appends the <<string-literals, string literal>> `"0"` value to a single
digit day since the format of the seat data allows for this case.
<10> Sets the <<primitive-types, `boolean type`>>
<<painless-variables, variable>> to `true` if the time `String` is a time
in the afternoon or evening.
Note::
* The use of the `ctx` ingest processor context variable to retrieve the
data from the `time` field.
<11> Uses the `split` function to separate the time `String` from the seat data
into hours and minutes `Strings`.
Note::
* The use of the `substring` method to remove the `AM` or `PM` portion of
the time `String`.
* The use of a `String` type value to `char` type value
<<string-character-casting, cast>> as part of the second argument since
character literals do not exist.
* The use of the `ctx` ingest processor context variable to retrieve the
data from the `date` field.
<12> If the time `String` is an afternoon or evening value adds the
<<integer-literals, integer literal>> `12` to the existing hours to move to
a 24-hour based time.
<13> Builds a new time `String` that is parsable using existing API methods.
<14> Creates a `ZonedDateTime` <<reference-types, reference type>> value by using
the API method `parse` to parse the new time `String`.
<15> Sets the datetime field `datetime` to the number of milliseconds retrieved
from the API method `getLong`.
Note::
* The use of the `ctx` ingest processor context variable to set the field
`datetime`. Manipulate each document's fields with the `ctx` variable as
each document is indexed.
Submit the following request:
[source,js]
----
PUT /_ingest/pipeline/seats
{
"description": "update datetime for seats",
"processors": [
{
"script": {
"source": "String[] split(String s, char d) { int count = 0; for (char c : s.toCharArray()) { if (c == d) { ++count; } } if (count == 0) { return new String[] {s}; } String[] r = new String[count + 1]; int i0 = 0, i1 = 0; count = 0; for (char c : s.toCharArray()) { if (c == d) { r[count++] = s.substring(i0, i1); i0 = i1 + 1; } ++i1; } r[count] = s.substring(i0, i1); return r; } String[] dateSplit = split(ctx.date, (char)\"-\"); String year = dateSplit[0].trim(); String month = dateSplit[1].trim(); if (month.length() == 1) { month = \"0\" + month; } String day = dateSplit[2].trim(); if (day.length() == 1) { day = \"0\" + day; } boolean pm = ctx.time.substring(ctx.time.length() - 2).equals(\"PM\"); String[] timeSplit = split(ctx.time.substring(0, ctx.time.length() - 2), (char)\":\"); int hours = Integer.parseInt(timeSplit[0].trim()); int minutes = Integer.parseInt(timeSplit[1].trim()); if (pm) { hours += 12; } String dts = year + \"-\" + month + \"-\" + day + \"T\" + (hours < 10 ? \"0\" + hours : \"\" + hours) + \":\" + (minutes < 10 ? \"0\" + minutes : \"\" + minutes) + \":00+08:00\"; ZonedDateTime dt = ZonedDateTime.parse(dts, DateTimeFormatter.ISO_OFFSET_DATE_TIME); ctx.datetime = dt.getLong(ChronoField.INSTANT_SECONDS)*1000L;"
}
}
]
}
----
// CONSOLE

View File

@ -5,7 +5,7 @@ Keywords are reserved tokens for built-in language features.
*Errors* *Errors*
If a keyword is used as an <<painless-identifiers, identifier>>. * If a keyword is used as an <<painless-identifiers, identifier>>.
*Keywords* *Keywords*

View File

@ -0,0 +1,311 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.painless;
/**
* These tests run the Painless scripts used in the context docs against
* slightly modified data designed around unit tests rather than a fully-
* running Elasticsearch server.
*/
public class ContextExampleTests extends ScriptTestCase {
// **** Docs Generator Code ****
/*
import java.io.FileWriter;
import java.io.IOException;
public class Generator {
public final static String[] theatres = new String[] {"Down Port", "Graye", "Skyline", "Courtyard"};
public final static String[] plays = new String[] {"Driving", "Pick It Up", "Sway and Pull", "Harriot",
"The Busline", "Ants Underground", "Exploria", "Line and Single", "Shafted", "Sunnyside Down",
"Test Run", "Auntie Jo"};
public final static String[] actors = new String[] {"James Holland", "Krissy Smith", "Joe Muir", "Ryan Earns",
"Joel Madigan", "Jessica Brown", "Baz Knight", "Jo Hangum", "Rachel Grass", "Phoebe Miller", "Sarah Notch",
"Brayden Green", "Joshua Iller", "Jon Hittle", "Rob Kettleman", "Laura Conrad", "Simon Hower", "Nora Blue",
"Mike Candlestick", "Jacey Bell"};
public static void writeSeat(FileWriter writer, int id, String theatre, String play, String[] actors,
String date, String time, int row, int number, double cost, boolean sold) throws IOException {
StringBuilder builder = new StringBuilder();
builder.append("{ \"create\" : { \"_index\" : \"seats\", \"_type\" : \"seat\", \"_id\" : \"");
builder.append(id);
builder.append("\" } }\n");
builder.append("{ \"theatre\" : \"");
builder.append(theatre);
builder.append("\", \"play\" : \"");
builder.append(play);
builder.append("\", \"actors\": [ \"");
for (String actor : actors) {
builder.append(actor);
if (actor.equals(actors[actors.length - 1]) == false) {
builder.append("\", \"");
}
}
builder.append("\" ], \"date\": \"");
builder.append(date);
builder.append("\", \"time\": \"");
builder.append(time);
builder.append("\", \"row\": ");
builder.append(row);
builder.append(", \"number\": ");
builder.append(number);
builder.append(", \"cost\": ");
builder.append(cost);
builder.append(", \"sold\": ");
builder.append(sold ? "true" : "false");
builder.append(" }\n");
writer.write(builder.toString());
}
public static void main(String args[]) throws IOException {
FileWriter writer = new FileWriter("/home/jdconrad/test/seats.json");
int id = 0;
for (int playCount = 0; playCount < 12; ++playCount) {
String play = plays[playCount];
String theatre;
String[] actor;
int startMonth;
int endMonth;
String time;
if (playCount == 0) {
theatre = theatres[0];
actor = new String[] {actors[0], actors[1], actors[2], actors[3]};
startMonth = 4;
endMonth = 5;
time = "3:00PM";
} else if (playCount == 1) {
theatre = theatres[0];
actor = new String[] {actors[4], actors[5], actors[6], actors[7], actors[8], actors[9]};
startMonth = 4;
endMonth = 6;
time = "8:00PM";
} else if (playCount == 2) {
theatre = theatres[0];
actor = new String[] {actors[0], actors[1], actors[2], actors[3],
actors[4], actors[5], actors[6], actors[7]};
startMonth = 6;
endMonth = 8;
time = "3:00 PM";
} else if (playCount == 3) {
theatre = theatres[0];
actor = new String[] {actors[9], actors[10], actors[11], actors[12], actors[13], actors[14],
actors[15], actors[16], actors[17], actors[18], actors[19]};
startMonth = 7;
endMonth = 8;
time = "8:00PM";
} else if (playCount == 4) {
theatre = theatres[0];
actor = new String[] {actors[13], actors[14], actors[15], actors[17], actors[18], actors[19]};
startMonth = 8;
endMonth = 10;
time = "3:00PM";
} else if (playCount == 5) {
theatre = theatres[0];
actor = new String[] {actors[8], actors[9], actors[10], actors[11], actors[12]};
startMonth = 8;
endMonth = 10;
time = "8:00PM";
} else if (playCount == 6) {
theatre = theatres[1];
actor = new String[] {actors[10], actors[11], actors[12], actors[13], actors[14], actors[15], actors[16]};
startMonth = 4;
endMonth = 5;
time = "11:00AM";
} else if (playCount == 7) {
theatre = theatres[1];
actor = new String[] {actors[17], actors[18]};
startMonth = 6;
endMonth = 9;
time = "2:00PM";
} else if (playCount == 8) {
theatre = theatres[1];
actor = new String[] {actors[0], actors[1], actors[2], actors[3], actors[16]};
startMonth = 10;
endMonth = 11;
time = "11:00AM";
} else if (playCount == 9) {
theatre = theatres[2];
actor = new String[] {actors[1], actors[2], actors[3], actors[17], actors[18], actors[19]};
startMonth = 3;
endMonth = 6;
time = "4:00PM";
} else if (playCount == 10) {
theatre = theatres[2];
actor = new String[] {actors[2], actors[3], actors[4], actors[5]};
startMonth = 7;
endMonth = 8;
time = "7:30PM";
} else if (playCount == 11) {
theatre = theatres[2];
actor = new String[] {actors[7], actors[13], actors[14], actors[15], actors[16], actors[17]};
startMonth = 9;
endMonth = 12;
time = "5:40PM";
} else {
throw new RuntimeException("too many plays");
}
int rows;
int number;
if (playCount < 6) {
rows = 3;
number = 12;
} else if (playCount < 9) {
rows = 5;
number = 9;
} else if (playCount < 12) {
rows = 11;
number = 15;
} else {
throw new RuntimeException("too many seats");
}
for (int month = startMonth; month <= endMonth; ++month) {
for (int day = 1; day <= 14; ++day) {
for (int row = 1; row <= rows; ++row) {
for (int count = 1; count <= number; ++count) {
String date = "2018-" + month + "-" + day;
double cost = (25 - row) * 1.25;
writeSeat(writer, ++id, theatre, play, actor, date, time, row, count, cost, false);
}
}
}
}
}
writer.write("\n");
writer.close();
}
}
*/
// **** Initial Mappings ****
/*
curl -X PUT "localhost:9200/seats" -H 'Content-Type: application/json' -d'
{
"mappings": {
"seat": {
"properties": {
"theatre": { "type": "keyword" },
"play": { "type": "text" },
"actors": { "type": "text" },
"row": { "type": "integer" },
"number": { "type": "integer" },
"cost": { "type": "double" },
"sold": { "type": "boolean" },
"datetime": { "type": "date" },
"date": { "type": "keyword" },
"time": { "type": "keyword" }
}
}
}
}
'
*/
// Create Ingest to Modify Dates:
/*
curl -X PUT "localhost:9200/_ingest/pipeline/seats" -H 'Content-Type: application/json' -d'
{
"description": "update datetime for seats",
"processors": [
{
"script": {
"source": "String[] split(String s, char d) { int count = 0; for (char c : s.toCharArray()) { if (c == d) { ++count; } } if (count == 0) { return new String[] {s}; } String[] r = new String[count + 1]; int i0 = 0, i1 = 0; count = 0; for (char c : s.toCharArray()) { if (c == d) { r[count++] = s.substring(i0, i1); i0 = i1 + 1; } ++i1; } r[count] = s.substring(i0, i1); return r; } String[] dateSplit = split(ctx.date, (char)\"-\"); String year = dateSplit[0].trim(); String month = dateSplit[1].trim(); if (month.length() == 1) { month = \"0\" + month; } String day = dateSplit[2].trim(); if (day.length() == 1) { day = \"0\" + day; } boolean pm = ctx.time.substring(ctx.time.length() - 2).equals(\"PM\"); String[] timeSplit = split(ctx.time.substring(0, ctx.time.length() - 2), (char)\":\"); int hours = Integer.parseInt(timeSplit[0].trim()); int minutes = Integer.parseInt(timeSplit[1].trim()); if (pm) { hours += 12; } String dts = year + \"-\" + month + \"-\" + day + \"T\" + (hours < 10 ? \"0\" + hours : \"\" + hours) + \":\" + (minutes < 10 ? \"0\" + minutes : \"\" + minutes) + \":00+08:00\"; ZonedDateTime dt = ZonedDateTime.parse(dts, DateTimeFormatter.ISO_OFFSET_DATE_TIME); ctx.datetime = dt.getLong(ChronoField.INSTANT_SECONDS)*1000L;"
}
}
]
}
'
*/
public void testIngestProcessorScript() {
assertEquals(1535785200000L,
exec("String[] split(String s, char d) {" +
" int count = 0;" +
" for (char c : s.toCharArray()) {" +
" if (c == d) {" +
" ++count;" +
" }" +
" }" +
" if (count == 0) {" +
" return new String[] {s};" +
" }" +
" String[] r = new String[count + 1];" +
" int i0 = 0, i1 = 0;" +
" count = 0;" +
" for (char c : s.toCharArray()) {" +
" if (c == d) {" +
" r[count++] = s.substring(i0, i1);" +
" i0 = i1 + 1;" +
" }" +
" ++i1;" +
" }" +
" r[count] = s.substring(i0, i1);" +
" return r;" +
"}" +
"def x = ['date': '2018-9-1', 'time': '3:00 PM'];" +
"String[] dateSplit = split(x.date, (char)'-');" +
"String year = dateSplit[0].trim();" +
"String month = dateSplit[1].trim();" +
"if (month.length() == 1) {" +
" month = '0' + month;" +
"}" +
"String day = dateSplit[2].trim();" +
"if (day.length() == 1) {" +
" day = '0' + day;" +
"}" +
"boolean pm = x.time.substring(x.time.length() - 2).equals('PM');" +
"String[] timeSplit = split(x.time.substring(0, x.time.length() - 2), (char)':');" +
"int hours = Integer.parseInt(timeSplit[0].trim());" +
"String minutes = timeSplit[1].trim();" +
"if (pm) {" +
" hours += 12;" +
"}" +
"String dts = year + '-' + month + '-' + day + " +
"'T' + (hours < 10 ? '0' + hours : '' + hours) + ':' + minutes + ':00+08:00';" +
"ZonedDateTime dt = ZonedDateTime.parse(dts, DateTimeFormatter.ISO_OFFSET_DATE_TIME);" +
"return dt.getLong(ChronoField.INSTANT_SECONDS) * 1000L"
)
);
}
// Post Generated Data:
/*
curl -XPOST localhost:9200/seats/seat/_bulk?pipeline=seats -H "Content-Type: application/x-ndjson" --data-binary "@/home/jdconrad/test/seats.json"
*/
}