Analysis: Add trim token filter that trims whitespaces, closes #1693.

This commit is contained in:
Shay Banon 2012-02-12 13:54:47 +02:00
parent 6b2b797e92
commit a96c2ab11d
4 changed files with 125 additions and 2 deletions

View File

@ -0,0 +1,63 @@
package org.elasticsearch.common.lucene.analysis;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import java.io.IOException;
/**
*/
// LUCENE MONITOR: Next version of Lucene (4.0) will have this as part of the analyzers module
public final class TrimFilter extends TokenFilter {
final boolean updateOffsets;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
public TrimFilter(TokenStream in, boolean updateOffsets) {
super(in);
this.updateOffsets = updateOffsets;
}
@Override
public boolean incrementToken() throws IOException {
if (!input.incrementToken()) return false;
char[] termBuffer = termAtt.buffer();
int len = termAtt.length();
//TODO: Is this the right behavior or should we return false? Currently, " ", returns true, so I think this should
//also return true
if (len == 0) {
return true;
}
int start = 0;
int end = 0;
int endOff = 0;
// eat the first characters
//QUESTION: Should we use Character.isWhitespace() instead?
for (start = 0; start < len && termBuffer[start] <= ' '; start++) {
}
// eat the end characters
for (end = len; end >= start && termBuffer[end - 1] <= ' '; end--) {
endOff++;
}
if (start > 0 || end < len) {
if (start < end) {
termAtt.copyBuffer(termBuffer, start, (end - start));
} else {
termAtt.setEmpty();
}
if (updateOffsets) {
int newStart = offsetAtt.startOffset() + start;
int newEnd = offsetAtt.endOffset() - (start < end ? endOff : 0);
offsetAtt.setOffset(newStart, newEnd);
}
}
return true;
}
}

View File

@ -406,6 +406,7 @@ public class AnalysisModule extends AbstractModule {
tokenFiltersBindings.processTokenFilter("shingle", ShingleTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("shingle", ShingleTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("unique", UniqueTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("unique", UniqueTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("truncate", TruncateTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("truncate", TruncateTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("trim", TrimTokenFilterFactory.class);
} }
@Override @Override

View File

@ -0,0 +1,48 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.lucene.analysis.TrimFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings;
/**
*
*/
public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean updateOffsets;
@Inject
public TrimTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.updateOffsets = settings.getAsBoolean("update_offsets", false);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new TrimFilter(tokenStream, updateOffsets);
}
}

View File

@ -79,6 +79,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.analysis.HTMLStripCharFilter; import org.elasticsearch.common.lucene.analysis.HTMLStripCharFilter;
import org.elasticsearch.common.lucene.analysis.TrimFilter;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -92,8 +93,6 @@ import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_
/** /**
* A node level registry of analyzers, to be reused by different indices which use default analyzers. * A node level registry of analyzers, to be reused by different indices which use default analyzers.
*
*
*/ */
public class IndicesAnalysisService extends AbstractComponent { public class IndicesAnalysisService extends AbstractComponent {
@ -330,6 +329,18 @@ public class IndicesAnalysisService extends AbstractComponent {
} }
})); }));
tokenFilterFactories.put("trim", new PreBuiltTokenFilterFactoryFactory(new TokenFilterFactory() {
@Override
public String name() {
return "trim";
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new TrimFilter(tokenStream, false);
}
}));
tokenFilterFactories.put("reverse", new PreBuiltTokenFilterFactoryFactory(new TokenFilterFactory() { tokenFilterFactories.put("reverse", new PreBuiltTokenFilterFactoryFactory(new TokenFilterFactory() {
@Override @Override
public String name() { public String name() {