Merge branch 'master' into feature/query-refactoring

Conflicts:
	core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java
This commit is contained in:
Christoph Büscher 2015-06-30 10:52:34 +02:00
commit 4406d236de
71 changed files with 2155 additions and 1781 deletions

View File

@ -0,0 +1 @@
233098147123ee5ddcd39ffc57ff648be4b7e5b2

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@

View File

@ -220,6 +220,14 @@
<optional>true</optional>
</dependency>
<!-- remove this for java 8 -->
<dependency>
<groupId>com.twitter</groupId>
<artifactId>jsr166e</artifactId>
<version>1.1.0</version>
</dependency>
<!-- We don't use this since the publish pom is then messed up -->
<!--
<dependency>
@ -409,7 +417,7 @@
<shadedPattern>org.elasticsearch.common.hppc</shadedPattern>
</relocation>
<relocation>
<pattern>jsr166e</pattern>
<pattern>com.twitter.jsr166e</pattern>
<shadedPattern>org.elasticsearch.common.util.concurrent.jsr166e</shadedPattern>
</relocation>
<relocation>
@ -1058,7 +1066,6 @@
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
<excludes>
<exclude>jsr166e/**</exclude>
<exclude>org/apache/lucene/**</exclude>
</excludes>
</configuration>

View File

@ -25,6 +25,7 @@
<include>com.tdunning:t-digest</include>
<include>org.apache.commons:commons-lang3</include>
<include>commons-cli:commons-cli</include>
<include>com.twitter:jsr166e</include>
</includes>
</dependencySet>
<dependencySet>

View File

@ -1,198 +0,0 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166e;
import java.io.Serializable;
/**
* One or more variables that together maintain an initially zero
* {@code double} sum. When updates (method {@link #add}) are
* contended across threads, the set of variables may grow dynamically
* to reduce contention. Method {@link #sum} (or, equivalently {@link
* #doubleValue}) returns the current total combined across the
* variables maintaining the sum.
*
* <p>This class extends {@link Number}, but does <em>not</em> define
* methods such as {@code equals}, {@code hashCode} and {@code
* compareTo} because instances are expected to be mutated, and so are
* not useful as collection keys.
*
* <p><em>jsr166e note: This class is targeted to be placed in
* java.util.concurrent.atomic.</em>
*
* @since 1.8
* @author Doug Lea
*/
public class DoubleAdder extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Update function. Note that we must use "long" for underlying
* representations, because there is no compareAndSet for double,
* due to the fact that the bitwise equals used in any CAS
* implementation is not the same as double-precision equals.
* However, we use CAS only to detect and alleviate contention,
* for which bitwise equals works best anyway. In principle, the
* long/double conversions used here should be essentially free on
* most platforms since they just re-interpret bits.
*
* Similar conversions are used in other methods.
*/
final long fn(long v, long x) {
return Double.doubleToRawLongBits
(Double.longBitsToDouble(v) +
Double.longBitsToDouble(x));
}
/**
* Creates a new adder with initial sum of zero.
*/
public DoubleAdder() {
}
/**
* Adds the given value.
*
* @param x the value to add
*/
public void add(double x) {
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null ||
!casBase(b = base,
Double.doubleToRawLongBits
(Double.longBitsToDouble(b) + x))) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
!(uncontended = a.cas(v = a.value,
Double.doubleToRawLongBits
(Double.longBitsToDouble(v) + x))))
retryUpdate(Double.doubleToRawLongBits(x), hc, uncontended);
}
}
/**
* Returns the current sum. The returned value is <em>NOT</em> an
* atomic snapshot; invocation in the absence of concurrent
* updates returns an accurate result, but concurrent updates that
* occur while the sum is being calculated might not be
* incorporated. Also, because floating-point arithmetic is not
* strictly associative, the returned result need not be identical
* to the value that would be obtained in a sequential series of
* updates to a single variable.
*
* @return the sum
*/
public double sum() {
Cell[] as = cells;
double sum = Double.longBitsToDouble(base);
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null)
sum += Double.longBitsToDouble(a.value);
}
}
return sum;
}
/**
* Resets variables maintaining the sum to zero. This method may
* be a useful alternative to creating a new adder, but is only
* effective if there are no concurrent updates. Because this
* method is intrinsically racy, it should only be used when it is
* known that no threads are concurrently updating.
*/
public void reset() {
internalReset(0L);
}
/**
* Equivalent in effect to {@link #sum} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* <em>not</em> guaranteed to be the final value occurring before
* the reset.
*
* @return the sum
*/
public double sumThenReset() {
Cell[] as = cells;
double sum = Double.longBitsToDouble(base);
base = 0L;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
long v = a.value;
a.value = 0L;
sum += Double.longBitsToDouble(v);
}
}
}
return sum;
}
/**
* Returns the String representation of the {@link #sum}.
* @return the String representation of the {@link #sum}
*/
public String toString() {
return Double.toString(sum());
}
/**
* Equivalent to {@link #sum}.
*
* @return the sum
*/
public double doubleValue() {
return sum();
}
/**
* Returns the {@link #sum} as a {@code long} after a
* narrowing primitive conversion.
*/
public long longValue() {
return (long)sum();
}
/**
* Returns the {@link #sum} as an {@code int} after a
* narrowing primitive conversion.
*/
public int intValue() {
return (int)sum();
}
/**
* Returns the {@link #sum} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float)sum();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(sum());
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = Double.doubleToRawLongBits(s.readDouble());
}
}

View File

@ -1,193 +0,0 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166e;
import java.io.Serializable;
/**
* One or more variables that together maintain a running {@code double}
* maximum with initial value {@code Double.NEGATIVE_INFINITY}. When
* updates (method {@link #update}) are contended across threads, the
* set of variables may grow dynamically to reduce contention. Method
* {@link #max} (or, equivalently, {@link #doubleValue}) returns the
* current maximum across the variables maintaining updates.
*
* <p>This class extends {@link Number}, but does <em>not</em> define
* methods such as {@code equals}, {@code hashCode} and {@code
* compareTo} because instances are expected to be mutated, and so are
* not useful as collection keys.
*
* <p><em>jsr166e note: This class is targeted to be placed in
* java.util.concurrent.atomic.</em>
*
* @since 1.8
* @author Doug Lea
*/
public class DoubleMaxUpdater extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Long representation of negative infinity. See class Double
* internal documentation for explanation.
*/
private static final long MIN_AS_LONG = 0xfff0000000000000L;
/**
* Update function. See class DoubleAdder for rationale
* for using conversions from/to long.
*/
final long fn(long v, long x) {
return Double.longBitsToDouble(v) > Double.longBitsToDouble(x) ? v : x;
}
/**
* Creates a new instance with initial value of {@code
* Double.NEGATIVE_INFINITY}.
*/
public DoubleMaxUpdater() {
base = MIN_AS_LONG;
}
/**
* Updates the maximum to be at least the given value.
*
* @param x the value to update
*/
public void update(double x) {
long lx = Double.doubleToRawLongBits(x);
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null ||
(Double.longBitsToDouble(b = base) < x && !casBase(b, lx))) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
(Double.longBitsToDouble(v = a.value) < x &&
!(uncontended = a.cas(v, lx))))
retryUpdate(lx, hc, uncontended);
}
}
/**
* Returns the current maximum. The returned value is
* <em>NOT</em> an atomic snapshot; invocation in the absence of
* concurrent updates returns an accurate result, but concurrent
* updates that occur while the value is being calculated might
* not be incorporated.
*
* @return the maximum
*/
public double max() {
Cell[] as = cells;
double max = Double.longBitsToDouble(base);
if (as != null) {
int n = as.length;
double v;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null && (v = Double.longBitsToDouble(a.value)) > max)
max = v;
}
}
return max;
}
/**
* Resets variables maintaining updates to {@code
* Double.NEGATIVE_INFINITY}. This method may be a useful
* alternative to creating a new updater, but is only effective if
* there are no concurrent updates. Because this method is
* intrinsically racy, it should only be used when it is known
* that no threads are concurrently updating.
*/
public void reset() {
internalReset(MIN_AS_LONG);
}
/**
* Equivalent in effect to {@link #max} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* <em>not</em> guaranteed to be the final value occurring before
* the reset.
*
* @return the maximum
*/
public double maxThenReset() {
Cell[] as = cells;
double max = Double.longBitsToDouble(base);
base = MIN_AS_LONG;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
double v = Double.longBitsToDouble(a.value);
a.value = MIN_AS_LONG;
if (v > max)
max = v;
}
}
}
return max;
}
/**
* Returns the String representation of the {@link #max}.
* @return the String representation of the {@link #max}
*/
public String toString() {
return Double.toString(max());
}
/**
* Equivalent to {@link #max}.
*
* @return the max
*/
public double doubleValue() {
return max();
}
/**
* Returns the {@link #max} as a {@code long} after a
* narrowing primitive conversion.
*/
public long longValue() {
return (long)max();
}
/**
* Returns the {@link #max} as an {@code int} after a
* narrowing primitive conversion.
*/
public int intValue() {
return (int)max();
}
/**
* Returns the {@link #max} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float)max();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(max());
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = Double.doubleToRawLongBits(s.readDouble());
}
}

View File

@ -1,199 +0,0 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166e;
import java.util.concurrent.atomic.AtomicLong;
import java.io.Serializable;
/**
* One or more variables that together maintain an initially zero
* {@code long} sum. When updates (method {@link #add}) are contended
* across threads, the set of variables may grow dynamically to reduce
* contention. Method {@link #sum} (or, equivalently, {@link
* #longValue}) returns the current total combined across the
* variables maintaining the sum.
*
* <p>This class is usually preferable to {@link AtomicLong} when
* multiple threads update a common sum that is used for purposes such
* as collecting statistics, not for fine-grained synchronization
* control. Under low update contention, the two classes have similar
* characteristics. But under high contention, expected throughput of
* this class is significantly higher, at the expense of higher space
* consumption.
*
* <p>This class extends {@link Number}, but does <em>not</em> define
* methods such as {@code equals}, {@code hashCode} and {@code
* compareTo} because instances are expected to be mutated, and so are
* not useful as collection keys.
*
* <p><em>jsr166e note: This class is targeted to be placed in
* java.util.concurrent.atomic.</em>
*
* @since 1.8
* @author Doug Lea
*/
public class LongAdder extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Version of plus for use in retryUpdate
*/
final long fn(long v, long x) { return v + x; }
/**
* Creates a new adder with initial sum of zero.
*/
public LongAdder() {
}
/**
* Adds the given value.
*
* @param x the value to add
*/
public void add(long x) {
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null || !casBase(b = base, b + x)) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
!(uncontended = a.cas(v = a.value, v + x)))
retryUpdate(x, hc, uncontended);
}
}
/**
* Equivalent to {@code add(1)}.
*/
public void increment() {
add(1L);
}
/**
* Equivalent to {@code add(-1)}.
*/
public void decrement() {
add(-1L);
}
/**
* Returns the current sum. The returned value is <em>NOT</em> an
* atomic snapshot; invocation in the absence of concurrent
* updates returns an accurate result, but concurrent updates that
* occur while the sum is being calculated might not be
* incorporated.
*
* @return the sum
*/
public long sum() {
long sum = base;
Cell[] as = cells;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null)
sum += a.value;
}
}
return sum;
}
/**
* Resets variables maintaining the sum to zero. This method may
* be a useful alternative to creating a new adder, but is only
* effective if there are no concurrent updates. Because this
* method is intrinsically racy, it should only be used when it is
* known that no threads are concurrently updating.
*/
public void reset() {
internalReset(0L);
}
/**
* Equivalent in effect to {@link #sum} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* <em>not</em> guaranteed to be the final value occurring before
* the reset.
*
* @return the sum
*/
public long sumThenReset() {
long sum = base;
Cell[] as = cells;
base = 0L;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
sum += a.value;
a.value = 0L;
}
}
}
return sum;
}
/**
* Returns the String representation of the {@link #sum}.
* @return the String representation of the {@link #sum}
*/
public String toString() {
return Long.toString(sum());
}
/**
* Equivalent to {@link #sum}.
*
* @return the sum
*/
public long longValue() {
return sum();
}
/**
* Returns the {@link #sum} as an {@code int} after a narrowing
* primitive conversion.
*/
public int intValue() {
return (int)sum();
}
/**
* Returns the {@link #sum} as a {@code float}
* after a widening primitive conversion.
*/
public float floatValue() {
return (float)sum();
}
/**
* Returns the {@link #sum} as a {@code double} after a widening
* primitive conversion.
*/
public double doubleValue() {
return (double)sum();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeLong(sum());
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = s.readLong();
}
}

View File

@ -1,183 +0,0 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166e;
import java.io.Serializable;
/**
* One or more variables that together maintain a running {@code long}
* maximum with initial value {@code Long.MIN_VALUE}. When updates
* (method {@link #update}) are contended across threads, the set of
* variables may grow dynamically to reduce contention. Method {@link
* #max} (or, equivalently, {@link #longValue}) returns the current
* maximum across the variables maintaining updates.
*
* <p>This class extends {@link Number}, but does <em>not</em> define
* methods such as {@code equals}, {@code hashCode} and {@code
* compareTo} because instances are expected to be mutated, and so are
* not useful as collection keys.
*
* <p><em>jsr166e note: This class is targeted to be placed in
* java.util.concurrent.atomic.</em>
*
* @since 1.8
* @author Doug Lea
*/
public class LongMaxUpdater extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Version of max for use in retryUpdate
*/
final long fn(long v, long x) { return v > x ? v : x; }
/**
* Creates a new instance with initial maximum of {@code
* Long.MIN_VALUE}.
*/
public LongMaxUpdater() {
base = Long.MIN_VALUE;
}
/**
* Updates the maximum to be at least the given value.
*
* @param x the value to update
*/
public void update(long x) {
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null ||
(b = base) < x && !casBase(b, x)) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
((v = a.value) < x && !(uncontended = a.cas(v, x))))
retryUpdate(x, hc, uncontended);
}
}
/**
* Returns the current maximum. The returned value is
* <em>NOT</em> an atomic snapshot; invocation in the absence of
* concurrent updates returns an accurate result, but concurrent
* updates that occur while the value is being calculated might
* not be incorporated.
*
* @return the maximum
*/
public long max() {
Cell[] as = cells;
long max = base;
if (as != null) {
int n = as.length;
long v;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null && (v = a.value) > max)
max = v;
}
}
return max;
}
/**
* Resets variables maintaining updates to {@code Long.MIN_VALUE}.
* This method may be a useful alternative to creating a new
* updater, but is only effective if there are no concurrent
* updates. Because this method is intrinsically racy, it should
* only be used when it is known that no threads are concurrently
* updating.
*/
public void reset() {
internalReset(Long.MIN_VALUE);
}
/**
* Equivalent in effect to {@link #max} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* <em>not</em> guaranteed to be the final value occurring before
* the reset.
*
* @return the maximum
*/
public long maxThenReset() {
Cell[] as = cells;
long max = base;
base = Long.MIN_VALUE;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
long v = a.value;
a.value = Long.MIN_VALUE;
if (v > max)
max = v;
}
}
}
return max;
}
/**
* Returns the String representation of the {@link #max}.
* @return the String representation of the {@link #max}
*/
public String toString() {
return Long.toString(max());
}
/**
* Equivalent to {@link #max}.
*
* @return the maximum
*/
public long longValue() {
return max();
}
/**
* Returns the {@link #max} as an {@code int} after a narrowing
* primitive conversion.
*/
public int intValue() {
return (int)max();
}
/**
* Returns the {@link #max} as a {@code float}
* after a widening primitive conversion.
*/
public float floatValue() {
return (float)max();
}
/**
* Returns the {@link #max} as a {@code double} after a widening
* primitive conversion.
*/
public double doubleValue() {
return (double)max();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeLong(max());
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = s.readLong();
}
}

View File

@ -1,341 +0,0 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166e;
import java.util.Random;
/**
* A package-local class holding common representation and mechanics
* for classes supporting dynamic striping on 64bit values. The class
* extends Number so that concrete subclasses must publicly do so.
*/
abstract class Striped64 extends Number {
/*
* This class maintains a lazily-initialized table of atomically
* updated variables, plus an extra "base" field. The table size
* is a power of two. Indexing uses masked per-thread hash codes.
* Nearly all declarations in this class are package-private,
* accessed directly by subclasses.
*
* Table entries are of class Cell; a variant of AtomicLong padded
* to reduce cache contention on most processors. Padding is
* overkill for most Atomics because they are usually irregularly
* scattered in memory and thus don't interfere much with each
* other. But Atomic objects residing in arrays will tend to be
* placed adjacent to each other, and so will most often share
* cache lines (with a huge negative performance impact) without
* this precaution.
*
* In part because Cells are relatively large, we avoid creating
* them until they are needed. When there is no contention, all
* updates are made to the base field. Upon first contention (a
* failed CAS on base update), the table is initialized to size 2.
* The table size is doubled upon further contention until
* reaching the nearest power of two greater than or equal to the
* number of CPUS. Table slots remain empty (null) until they are
* needed.
*
* A single spinlock ("busy") is used for initializing and
* resizing the table, as well as populating slots with new Cells.
* There is no need for a blocking lock; when the lock is not
* available, threads try other slots (or the base). During these
* retries, there is increased contention and reduced locality,
* which is still better than alternatives.
*
* Per-thread hash codes are initialized to random values.
* Contention and/or table collisions are indicated by failed
* CASes when performing an update operation (see method
* retryUpdate). Upon a collision, if the table size is less than
* the capacity, it is doubled in size unless some other thread
* holds the lock. If a hashed slot is empty, and lock is
* available, a new Cell is created. Otherwise, if the slot
* exists, a CAS is tried. Retries proceed by "double hashing",
* using a secondary hash (Marsaglia XorShift) to try to find a
* free slot.
*
* The table size is capped because, when there are more threads
* than CPUs, supposing that each thread were bound to a CPU,
* there would exist a perfect hash function mapping threads to
* slots that eliminates collisions. When we reach capacity, we
* search for this mapping by randomly varying the hash codes of
* colliding threads. Because search is random, and collisions
* only become known via CAS failures, convergence can be slow,
* and because threads are typically not bound to CPUS forever,
* may not occur at all. However, despite these limitations,
* observed contention rates are typically low in these cases.
*
* It is possible for a Cell to become unused when threads that
* once hashed to it terminate, as well as in the case where
* doubling the table causes no thread to hash to it under
* expanded mask. We do not try to detect or remove such cells,
* under the assumption that for long-running instances, observed
* contention levels will recur, so the cells will eventually be
* needed again; and for short-lived ones, it does not matter.
*/
/**
* Padded variant of AtomicLong supporting only raw accesses plus CAS.
* The value field is placed between pads, hoping that the JVM doesn't
* reorder them.
*
* JVM intrinsics note: It would be possible to use a release-only
* form of CAS here, if it were provided.
*/
static final class Cell {
volatile long p0, p1, p2, p3, p4, p5, p6;
volatile long value;
volatile long q0, q1, q2, q3, q4, q5, q6;
Cell(long x) { value = x; }
final boolean cas(long cmp, long val) {
return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long valueOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> ak = Cell.class;
valueOffset = UNSAFE.objectFieldOffset
(ak.getDeclaredField("value"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/**
* Holder for the thread-local hash code. The code is initially
* random, but may be set to a different value upon collisions.
*/
static final class HashCode {
static final Random rng = new Random();
int code;
HashCode() {
int h = rng.nextInt(); // Avoid zero to allow xorShift rehash
code = (h == 0) ? 1 : h;
}
}
/**
* The corresponding ThreadLocal class
*/
static final class ThreadHashCode extends ThreadLocal<HashCode> {
public HashCode initialValue() { return new HashCode(); }
}
/**
* Static per-thread hash codes. Shared across all instances to
* reduce ThreadLocal pollution and because adjustments due to
* collisions in one table are likely to be appropriate for
* others.
*/
static final ThreadHashCode threadHashCode = new ThreadHashCode();
/** Number of CPUS, to place bound on table size */
static final int NCPU = Runtime.getRuntime().availableProcessors();
/**
* Table of cells. When non-null, size is a power of 2.
*/
transient volatile Cell[] cells;
/**
* Base value, used mainly when there is no contention, but also as
* a fallback during table initialization races. Updated via CAS.
*/
transient volatile long base;
/**
* Spinlock (locked via CAS) used when resizing and/or creating Cells.
*/
transient volatile int busy;
/**
* Package-private default constructor
*/
Striped64() {
}
/**
* CASes the base field.
*/
final boolean casBase(long cmp, long val) {
return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val);
}
/**
* CASes the busy field from 0 to 1 to acquire lock.
*/
final boolean casBusy() {
return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1);
}
/**
* Computes the function of current and new value. Subclasses
* should open-code this update function for most uses, but the
* virtualized form is needed within retryUpdate.
*
* @param currentValue the current value (of either base or a cell)
* @param newValue the argument from a user update call
* @return result of the update function
*/
abstract long fn(long currentValue, long newValue);
/**
* Handles cases of updates involving initialization, resizing,
* creating new Cells, and/or contention. See above for
* explanation. This method suffers the usual non-modularity
* problems of optimistic retry code, relying on rechecked sets of
* reads.
*
* @param x the value
* @param hc the hash code holder
* @param wasUncontended false if CAS failed before call
*/
final void retryUpdate(long x, HashCode hc, boolean wasUncontended) {
int h = hc.code;
boolean collide = false; // True if last slot nonempty
for (;;) {
Cell[] as; Cell a; int n; long v;
if ((as = cells) != null && (n = as.length) > 0) {
if ((a = as[(n - 1) & h]) == null) {
if (busy == 0) { // Try to attach new Cell
Cell r = new Cell(x); // Optimistically create
if (busy == 0 && casBusy()) {
boolean created = false;
try { // Recheck under lock
Cell[] rs; int m, j;
if ((rs = cells) != null &&
(m = rs.length) > 0 &&
rs[j = (m - 1) & h] == null) {
rs[j] = r;
created = true;
}
} finally {
busy = 0;
}
if (created)
break;
continue; // Slot is now non-empty
}
}
collide = false;
}
else if (!wasUncontended) // CAS already known to fail
wasUncontended = true; // Continue after rehash
else if (a.cas(v = a.value, fn(v, x)))
break;
else if (n >= NCPU || cells != as)
collide = false; // At max size or stale
else if (!collide)
collide = true;
else if (busy == 0 && casBusy()) {
try {
if (cells == as) { // Expand table unless stale
Cell[] rs = new Cell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
cells = rs;
}
} finally {
busy = 0;
}
collide = false;
continue; // Retry with expanded table
}
h ^= h << 13; // Rehash
h ^= h >>> 17;
h ^= h << 5;
}
else if (busy == 0 && cells == as && casBusy()) {
boolean init = false;
try { // Initialize table
if (cells == as) {
Cell[] rs = new Cell[2];
rs[h & 1] = new Cell(x);
cells = rs;
init = true;
}
} finally {
busy = 0;
}
if (init)
break;
}
else if (casBase(v = base, fn(v, x)))
break; // Fall back on using base
}
hc.code = h; // Record index for next time
}
/**
* Sets base and all cells to the given value.
*/
final void internalReset(long initialValue) {
Cell[] as = cells;
base = initialValue;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null)
a.value = initialValue;
}
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long baseOffset;
private static final long busyOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> sk = Striped64.class;
baseOffset = UNSAFE.objectFieldOffset
(sk.getDeclaredField("base"));
busyOffset = UNSAFE.objectFieldOffset
(sk.getDeclaredField("busy"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}

View File

@ -140,7 +140,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
docs = DocsStats.readDocStats(in);
store = StoreStats.readStoreStats(in);
fieldData = FieldDataStats.readFieldDataStats(in);
queryCache = QueryCacheStats.readFilterCacheStats(in);
queryCache = QueryCacheStats.readQueryCacheStats(in);
completion = CompletionStats.readCompletionStats(in);
segments = SegmentsStats.readSegmentsStats(in);
percolate = PercolateStats.readPercolateStats(in);

View File

@ -539,7 +539,7 @@ public class CommonStats implements Streamable, ToXContent {
warmer = WarmerStats.readWarmerStats(in);
}
if (in.readBoolean()) {
queryCache = QueryCacheStats.readFilterCacheStats(in);
queryCache = QueryCacheStats.readQueryCacheStats(in);
}
if (in.readBoolean()) {
fieldData = FieldDataStats.readFieldDataStats(in);

View File

@ -19,6 +19,9 @@
package org.elasticsearch.bootstrap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.Constants;
import org.elasticsearch.ExceptionsHelper;
@ -29,6 +32,7 @@ import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@ -42,10 +46,20 @@ import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.hyperic.sigar.Sigar;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory;
import static com.google.common.collect.Sets.newHashSet;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@ -162,6 +176,12 @@ public class Bootstrap {
});
}
// install any plugins into classpath
setupPlugins(environment);
// look for jar hell
JarHell.checkJarHell();
// install SM after natives, shutdown hooks, etc.
setupSecurity(settings, environment);
@ -348,4 +368,76 @@ public class Bootstrap {
}
return errorMessage.toString();
}
static final String PLUGIN_LIB_PATTERN = "glob:**.{jar,zip}";
private static void setupPlugins(Environment environment) throws IOException {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
Path pluginsDirectory = environment.pluginsFile();
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return;
}
// note: there's only one classloader here, but Uwe gets upset otherwise.
ClassLoader classLoader = Bootstrap.class.getClassLoader();
Class<?> classLoaderClass = classLoader.getClass();
Method addURL = null;
while (!classLoaderClass.equals(Object.class)) {
try {
addURL = classLoaderClass.getDeclaredMethod("addURL", URL.class);
addURL.setAccessible(true);
break;
} catch (NoSuchMethodException e) {
// no method, try the parent
classLoaderClass = classLoaderClass.getSuperclass();
}
}
if (addURL == null) {
logger.debug("failed to find addURL method on classLoader [" + classLoader + "] to add methods");
return;
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
for (Path plugin : stream) {
// We check that subdirs are directories and readable
if (!isAccessibleDirectory(plugin, logger)) {
continue;
}
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
try {
// add the root
addURL.invoke(classLoader, plugin.toUri().toURL());
// gather files to add
List<Path> libFiles = Lists.newArrayList();
libFiles.addAll(Arrays.asList(files(plugin)));
Path libLocation = plugin.resolve("lib");
if (Files.isDirectory(libLocation)) {
libFiles.addAll(Arrays.asList(files(libLocation)));
}
PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(PLUGIN_LIB_PATTERN);
// if there are jars in it, add it as well
for (Path libFile : libFiles) {
if (!matcher.matches(libFile)) {
continue;
}
addURL.invoke(classLoader, libFile.toUri().toURL());
}
} catch (Throwable e) {
logger.warn("failed to add plugin [" + plugin + "]", e);
}
}
}
}
private static Path[] files(Path from) throws IOException {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(from)) {
return Iterators.toArray(stream.iterator(), Path.class);
}
}
}

View File

@ -0,0 +1,131 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import java.io.IOException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
/** Simple check for duplicate class files across the classpath */
class JarHell {
/**
* Checks the current classloader for duplicate classes
* @throws IllegalStateException if jar hell was found
*/
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
static void checkJarHell() throws Exception {
ClassLoader loader = JarHell.class.getClassLoader();
if (loader instanceof URLClassLoader == false) {
return;
}
final Map<String,URL> clazzes = new HashMap<>(32768);
Set<String> seenJars = new HashSet<>();
for (final URL url : ((URLClassLoader)loader).getURLs()) {
String path = url.getPath();
if (path.endsWith(".jar")) {
if (!seenJars.add(path)) {
continue; // we can't fail because of sheistiness with joda-time
}
try (JarFile file = new JarFile(url.getPath())) {
Manifest manifest = file.getManifest();
if (manifest != null) {
// inspect Manifest: give a nice error if jar requires a newer java version
String systemVersion = System.getProperty("java.specification.version");
String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK");
if (targetVersion != null) {
float current = Float.POSITIVE_INFINITY;
float target = Float.NEGATIVE_INFINITY;
try {
current = Float.parseFloat(systemVersion);
target = Float.parseFloat(targetVersion);
} catch (NumberFormatException e) {
// some spec changed, time for a more complex parser
}
if (current < target) {
throw new IllegalStateException(path + " requires Java " + targetVersion
+ ", your system: " + systemVersion);
}
}
}
// inspect entries
Enumeration<JarEntry> elements = file.entries();
while (elements.hasMoreElements()) {
String entry = elements.nextElement().getName();
if (entry.endsWith(".class")) {
// for jar format, the separator is defined as /
entry = entry.replace('/', '.').substring(0, entry.length() - 6);
checkClass(clazzes, entry, url);
}
}
}
} else {
// case for tests: where we have class files in the classpath
final Path root = PathUtils.get(url.toURI());
final String sep = root.getFileSystem().getSeparator();
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String entry = root.relativize(file).toString();
if (entry.endsWith(".class")) {
// normalize with the os separator
entry = entry.replace(sep, ".").substring(0, entry.length() - 6);
checkClass(clazzes, entry, url);
}
return super.visitFile(file, attrs);
}
});
}
}
}
@SuppressForbidden(reason = "proper use of URL to reduce noise")
static void checkClass(Map<String,URL> clazzes, String clazz, URL url) {
if (clazz.startsWith("org.apache.log4j")) {
return; // go figure, jar hell for what should be System.out.println...
}
if (clazz.equals("org.joda.time.base.BaseDateTime")) {
return; // apparently this is intentional... clean this up
}
URL previous = clazzes.put(clazz, url);
if (previous != null) {
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
"class: " + clazz + System.lineSeparator() +
"jar1: " + previous.getPath() + System.lineSeparator() +
"jar2: " + url.getPath());
}
}
}

View File

@ -23,6 +23,8 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.env.Environment;
import java.io.*;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.AccessMode;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
@ -30,6 +32,10 @@ import java.nio.file.NotDirectoryException;
import java.nio.file.Path;
import java.security.Permissions;
import java.security.Policy;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.regex.Pattern;
/**
* Initializes securitymanager with necessary permissions.
@ -44,6 +50,9 @@ final class Security {
* Can only happen once!
*/
static void configure(Environment environment) throws Exception {
// set properties for jar locations
setCodebaseProperties();
// enable security policy: union of template and environment-based paths.
Policy.setPolicy(new ESPolicy(createPermissions(environment)));
@ -54,6 +63,50 @@ final class Security {
selfTest();
}
// mapping of jars to codebase properties
// note that this is only read once, when policy is parsed.
private static final Map<Pattern,String> SPECIAL_JARS;
static {
Map<Pattern,String> m = new IdentityHashMap<>();
m.put(Pattern.compile(".*lucene-core-.*\\.jar$"), "es.security.jar.lucene.core");
m.put(Pattern.compile(".*jsr166e-.*\\.jar$"), "es.security.jar.twitter.jsr166e");
m.put(Pattern.compile(".*securemock-.*\\.jar$"), "es.security.jar.elasticsearch.securemock");
SPECIAL_JARS = Collections.unmodifiableMap(m);
}
/**
* Sets properties (codebase URLs) for policy files.
* JAR locations are not fixed so we have to find the locations of
* the ones we want.
*/
@SuppressForbidden(reason = "proper use of URL")
static void setCodebaseProperties() {
ClassLoader loader = Security.class.getClassLoader();
if (loader instanceof URLClassLoader) {
for (URL url : ((URLClassLoader)loader).getURLs()) {
for (Map.Entry<Pattern,String> e : SPECIAL_JARS.entrySet()) {
if (e.getKey().matcher(url.getPath()).matches()) {
String prop = e.getValue();
// TODO: we need to fix plugins to not include duplicate e.g. lucene-core jars,
// to add back this safety check! see https://github.com/elastic/elasticsearch/issues/11647
// if (System.getProperty(prop) != null) {
// throw new IllegalStateException("property: " + prop + " is unexpectedly set: " + System.getProperty(prop));
//}
System.setProperty(prop, url.toString());
}
}
}
for (String prop : SPECIAL_JARS.values()) {
if (System.getProperty(prop) == null) {
System.setProperty(prop, "file:/dev/null"); // no chance to be interpreted as "all"
}
}
} else {
// we could try to parse the classpath or something, but screw it for now.
throw new UnsupportedOperationException("Unsupported system classloader type: " + loader.getClass());
}
}
/** returns dynamic Permissions to configured paths */
static Permissions createPermissions(Environment environment) throws IOException {
// TODO: improve test infra so we can reduce permissions where read/write

View File

@ -120,7 +120,7 @@ public class ClusterStateObserver {
timeoutTimeLeftMS = timeOutValue.millis() - timeSinceStartMS;
if (timeoutTimeLeftMS <= 0l) {
// things have timeout while we were busy -> notify
logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
// update to latest, in case people want to retry
timedOut = true;
lastObservedState.set(new ObservedState(clusterService.state()));
@ -232,7 +232,7 @@ public class ClusterStateObserver {
if (context != null) {
clusterService.remove(this);
long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
logger.debug("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
// update to latest, in case people want to retry
lastObservedState.set(new ObservedState(clusterService.state()));
timedOut = true;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.metrics;
import jsr166e.LongAdder;
import com.twitter.jsr166e.LongAdder;
/**
*/

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.metrics;
import jsr166e.LongAdder;
import com.twitter.jsr166e.LongAdder;
import java.util.concurrent.TimeUnit;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.metrics;
import jsr166e.LongAdder;
import com.twitter.jsr166e.LongAdder;
/**
*/

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.metrics;
import jsr166e.LongAdder;
import com.twitter.jsr166e.LongAdder;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import java.util.concurrent.ScheduledExecutorService;

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index.cache;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -38,14 +37,12 @@ import java.io.IOException;
public class IndexCache extends AbstractIndexComponent implements Closeable {
private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy;
private final BitsetFilterCache bitsetFilterCache;
@Inject
public IndexCache(Index index, @IndexSettings Settings indexSettings, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, BitsetFilterCache bitsetFilterCache) {
public IndexCache(Index index, @IndexSettings Settings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache) {
super(index, indexSettings);
this.queryCache = queryCache;
this.queryCachingPolicy = queryCachingPolicy;
this.bitsetFilterCache = bitsetFilterCache;
}
@ -53,10 +50,6 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
return queryCache;
}
public QueryCachingPolicy queryPolicy() {
return queryCachingPolicy;
}
/**
* Return the {@link BitsetFilterCache} for this index.
*/

View File

@ -19,8 +19,6 @@
package org.elasticsearch.index.cache.query;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Scopes;
import org.elasticsearch.common.settings.Settings;
@ -31,10 +29,10 @@ import org.elasticsearch.index.cache.query.index.IndexQueryCache;
*/
public class QueryCacheModule extends AbstractModule {
public static final class FilterCacheSettings {
public static final String FILTER_CACHE_TYPE = "index.queries.cache.type";
public static final class QueryCacheSettings {
public static final String QUERY_CACHE_TYPE = "index.queries.cache.type";
// for test purposes only
public static final String FILTER_CACHE_EVERYTHING = "index.queries.cache.everything";
public static final String QUERY_CACHE_EVERYTHING = "index.queries.cache.everything";
}
private final Settings settings;
@ -46,15 +44,7 @@ public class QueryCacheModule extends AbstractModule {
@Override
protected void configure() {
bind(QueryCache.class)
.to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class, "org.elasticsearch.index.cache.query.", "FilterCache"))
.to(settings.getAsClass(QueryCacheSettings.QUERY_CACHE_TYPE, IndexQueryCache.class, "org.elasticsearch.index.cache.query.", "QueryCache"))
.in(Scopes.SINGLETON);
// the query cache is a node-level thing, however we want the most popular queries
// to be computed on a per-index basis, that is why we don't use the SINGLETON
// scope below
if (settings.getAsBoolean(FilterCacheSettings.FILTER_CACHE_EVERYTHING, false)) {
bind(QueryCachingPolicy.class).toInstance(QueryCachingPolicy.ALWAYS_CACHE);
} else {
bind(QueryCachingPolicy.class).toInstance(new UsageTrackingQueryCachingPolicy());
}
}
}

View File

@ -108,7 +108,7 @@ public class QueryCacheStats implements Streamable, ToXContent {
return cacheCount - cacheSize;
}
public static QueryCacheStats readFilterCacheStats(StreamInput in) throws IOException {
public static QueryCacheStats readQueryCacheStats(StreamInput in) throws IOException {
QueryCacheStats stats = new QueryCacheStats();
stats.readFrom(in);
return stats;

View File

@ -36,12 +36,12 @@ import org.elasticsearch.indices.cache.query.IndicesQueryCache;
*/
public class IndexQueryCache extends AbstractIndexComponent implements QueryCache {
final IndicesQueryCache indicesFilterCache;
final IndicesQueryCache indicesQueryCache;
@Inject
public IndexQueryCache(Index index, @IndexSettings Settings indexSettings, IndicesQueryCache indicesFilterCache) {
public IndexQueryCache(Index index, @IndexSettings Settings indexSettings, IndicesQueryCache indicesQueryCache) {
super(index, indexSettings);
this.indicesFilterCache = indicesFilterCache;
this.indicesQueryCache = indicesQueryCache;
}
@Override
@ -52,12 +52,12 @@ public class IndexQueryCache extends AbstractIndexComponent implements QueryCach
@Override
public void clear(String reason) {
logger.debug("full cache clear, reason [{}]", reason);
indicesFilterCache.clearIndex(index.getName());
indicesQueryCache.clearIndex(index.getName());
}
@Override
public Weight doCache(Weight weight, QueryCachingPolicy policy) {
return indicesFilterCache.doCache(weight, policy);
return indicesQueryCache.doCache(weight, policy);
}
}

View File

@ -86,6 +86,8 @@ public class NestedQueryParser extends BaseQueryParserTemp {
String sScoreMode = parser.text();
if ("avg".equals(sScoreMode)) {
scoreMode = ScoreMode.Avg;
} else if ("min".equals(sScoreMode)) {
scoreMode = ScoreMode.Min;
} else if ("max".equals(sScoreMode)) {
scoreMode = ScoreMode.Max;
} else if ("total".equals(sScoreMode) || "sum".equals(sScoreMode)) {

View File

@ -24,6 +24,8 @@ import com.google.common.base.Preconditions;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.ThreadInterruptedException;
@ -59,6 +61,7 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCacheModule.QueryCacheSettings;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.cache.request.ShardRequestCache;
import org.elasticsearch.index.codec.CodecService;
@ -155,7 +158,7 @@ public class IndexShard extends AbstractIndexShardComponent {
private final EngineConfig engineConfig;
private final TranslogConfig translogConfig;
private final MergePolicyConfig mergePolicyConfig;
private final IndicesQueryCache indicesFilterCache;
private final IndicesQueryCache indicesQueryCache;
private final StoreRecoveryService storeRecoveryService;
private TimeValue refreshInterval;
@ -192,7 +195,7 @@ public class IndexShard extends AbstractIndexShardComponent {
@Inject
public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService,
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService,
IndicesQueryCache indicesFilterCache, ShardPercolateService shardPercolateService, CodecService codecService,
IndicesQueryCache indicesQueryCache, ShardPercolateService shardPercolateService, CodecService codecService,
ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, IndexService indexService,
@Nullable IndicesWarmer warmer, SnapshotDeletionPolicy deletionPolicy, SimilarityService similarityService, EngineFactory factory,
ClusterService clusterService, ShardPath path, BigArrays bigArrays) {
@ -219,7 +222,7 @@ public class IndexShard extends AbstractIndexShardComponent {
this.termVectorsService = termVectorsService.setIndexShard(this);
this.searchService = new ShardSearchStats(indexSettings);
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
this.indicesFilterCache = indicesFilterCache;
this.indicesQueryCache = indicesQueryCache;
this.shardQueryCache = new ShardRequestCache(shardId, indexSettings);
this.shardFieldData = new ShardFieldData();
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, indicesLifecycle, mapperService, indexFieldDataService, shardPercolateService);
@ -242,8 +245,15 @@ public class IndexShard extends AbstractIndexShardComponent {
this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false");
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, indexSettings, Translog.Durabilty.REQUEST),
bigArrays, threadPool);
this.engineConfig = newEngineConfig(translogConfig);
final QueryCachingPolicy cachingPolicy;
// the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis
if (indexSettings.getAsBoolean(QueryCacheSettings.QUERY_CACHE_EVERYTHING, false)) {
cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE;
} else {
cachingPolicy = new UsageTrackingQueryCachingPolicy();
}
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
}
@ -313,6 +323,10 @@ public class IndexShard extends AbstractIndexShardComponent {
return this.shardRouting;
}
public QueryCachingPolicy getQueryCachingPolicy() {
return this.engineConfig.getQueryCachingPolicy();
}
/**
* Updates the shards routing entry. This mutate the shards internal state depending
* on the changes that get introduced by the new routing value. This method will persist shard level metadata
@ -616,7 +630,7 @@ public class IndexShard extends AbstractIndexShardComponent {
}
public QueryCacheStats queryCacheStats() {
return indicesFilterCache.getStats(shardId);
return indicesQueryCache.getStats(shardId);
}
public FieldDataStats fieldDataStats(String... fields) {
@ -1336,7 +1350,7 @@ public class IndexShard extends AbstractIndexShardComponent {
return mapperService.documentMapperWithAutoCreate(type);
}
private final EngineConfig newEngineConfig(TranslogConfig translogConfig) {
private final EngineConfig newEngineConfig(TranslogConfig translogConfig, QueryCachingPolicy cachingPolicy) {
final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(shardId, mapperService, queryParserService, indexAliasesService, indexCache) {
@Override
protected void operationProcessed() {
@ -1346,7 +1360,7 @@ public class IndexShard extends AbstractIndexShardComponent {
};
return new EngineConfig(shardId,
threadPool, indexingService, indexSettingsService.indexSettings(), warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), indexCache.queryPolicy(), translogConfig);
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig);
}
private static class IndexShardOperationCounter extends AbstractRefCounted {

View File

@ -60,7 +60,7 @@ public final class ShadowIndexShard extends IndexShard {
IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService,
ThreadPool threadPool, MapperService mapperService,
IndexQueryParserService queryParserService, IndexCache indexCache,
IndexAliasesService indexAliasesService, IndicesQueryCache indicesFilterCache,
IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache,
ShardPercolateService shardPercolateService, CodecService codecService,
ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
IndexService indexService, @Nullable IndicesWarmer warmer,
@ -69,7 +69,7 @@ public final class ShadowIndexShard extends IndexShard {
ShardPath path, BigArrays bigArrays) throws IOException {
super(shardId, indexSettingsService, indicesLifecycle, store, storeRecoveryService,
threadPool, mapperService, queryParserService, indexCache, indexAliasesService,
indicesFilterCache, shardPercolateService, codecService,
indicesQueryCache, shardPercolateService, codecService,
termVectorsService, indexFieldDataService, indexService,
warmer, deletionPolicy, similarityService,
factory, clusterService, path, bigArrays);

View File

@ -233,8 +233,9 @@ public abstract class TranslogReader implements Closeable, Comparable<TranslogRe
BytesRef ref = new BytesRef(len);
ref.length = len;
headerStream.read(ref.bytes, ref.offset, ref.length);
if (ref.utf8ToString().equals(translogUUID) == false) {
throw new TranslogCorruptedException("expected shard UUID [" + translogUUID + "] but got: [" + ref.utf8ToString() + "] this translog file belongs to a different translog");
BytesRef uuidBytes = new BytesRef(translogUUID);
if (uuidBytes.bytesEquals(ref) == false) {
throw new TranslogCorruptedException("expected shard UUID [" + uuidBytes + "] but got: [" + ref + "] this translog file belongs to a different translog");
}
return new ImmutableTranslogReader(channelReference.getGeneration(), channelReference, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + RamUsageEstimator.NUM_BYTES_INT, checkpoint.offset, checkpoint.numOps);
default:

View File

@ -132,12 +132,12 @@ public class NodeIndicesStats implements Streamable, Serializable, ToXContent {
}
@Nullable
public QueryCacheStats getFilterCache() {
public QueryCacheStats getQueryCache() {
return stats.getQueryCache();
}
@Nullable
public RequestCacheStats getQueryCache() {
public RequestCacheStats getRequestCache() {
return stats.getRequestCache();
}

View File

@ -389,12 +389,14 @@ public class RecoveryState implements ToXContent, Streamable {
public static class Timer implements Streamable {
protected long startTime = 0;
protected long startNanoTime = 0;
protected long time = -1;
protected long stopTime = 0;
public synchronized void start() {
assert startTime == 0 : "already started";
startTime = TimeValue.nsecToMSec(System.nanoTime());
startTime = System.currentTimeMillis();
startNanoTime = System.nanoTime();
}
/** Returns start time in millis */
@ -404,13 +406,13 @@ public class RecoveryState implements ToXContent, Streamable {
/** Returns elapsed time in millis, or 0 if timer was not started */
public synchronized long time() {
if (startTime == 0) {
if (startNanoTime == 0) {
return 0;
}
if (time >= 0) {
return time;
}
return Math.max(0, TimeValue.nsecToMSec(System.nanoTime()) - startTime);
return Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startNanoTime));
}
/** Returns stop time in millis */
@ -420,13 +422,14 @@ public class RecoveryState implements ToXContent, Streamable {
public synchronized void stop() {
assert stopTime == 0 : "already stopped";
stopTime = Math.max(TimeValue.nsecToMSec(System.nanoTime()), startTime);
time = stopTime - startTime;
stopTime = Math.max(System.currentTimeMillis(), startTime);
time = TimeValue.nsecToMSec(System.nanoTime() - startNanoTime);
assert time >= 0;
}
public synchronized void reset() {
startTime = 0;
startNanoTime = 0;
time = -1;
stopTime = 0;
}
@ -435,6 +438,7 @@ public class RecoveryState implements ToXContent, Streamable {
@Override
public synchronized void readFrom(StreamInput in) throws IOException {
startTime = in.readVLong();
startNanoTime = in.readVLong();
stopTime = in.readVLong();
time = in.readVLong();
}
@ -442,6 +446,7 @@ public class RecoveryState implements ToXContent, Streamable {
@Override
public synchronized void writeTo(StreamOutput out) throws IOException {
out.writeVLong(startTime);
out.writeVLong(startNanoTime);
out.writeVLong(stopTime);
// write a snapshot of current time, which is not per se the time field
out.writeVLong(time());

View File

@ -61,7 +61,6 @@ public class PluginsService extends AbstractComponent {
public static final String ES_PLUGIN_PROPERTIES = "es-plugin.properties";
public static final String LOAD_PLUGIN_FROM_CLASSPATH = "plugins.load_classpath_plugins";
static final String PLUGIN_LIB_PATTERN = "glob:**.{jar,zip}";
public static final String PLUGINS_CHECK_LUCENE_KEY = "plugins.check_lucene";
public static final String PLUGINS_INFO_REFRESH_INTERVAL_KEY = "plugins.info_refresh_interval";
@ -118,11 +117,6 @@ public class PluginsService extends AbstractComponent {
}
// now, find all the ones that are in the classpath
try {
loadPluginsIntoClassLoader();
} catch (IOException ex) {
throw new IllegalStateException("Can't load plugins into classloader", ex);
}
if (loadClasspathPlugins) {
tupleBuilder.addAll(loadPluginsFromClasspath(settings));
}
@ -349,71 +343,7 @@ public class PluginsService extends AbstractComponent {
return cachedPluginsInfo;
}
private void loadPluginsIntoClassLoader() throws IOException {
Path pluginsDirectory = environment.pluginsFile();
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return;
}
ClassLoader classLoader = settings.getClassLoader();
Class classLoaderClass = classLoader.getClass();
Method addURL = null;
while (!classLoaderClass.equals(Object.class)) {
try {
addURL = classLoaderClass.getDeclaredMethod("addURL", URL.class);
addURL.setAccessible(true);
break;
} catch (NoSuchMethodException e) {
// no method, try the parent
classLoaderClass = classLoaderClass.getSuperclass();
}
}
if (addURL == null) {
logger.debug("failed to find addURL method on classLoader [" + classLoader + "] to add methods");
return;
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
for (Path plugin : stream) {
// We check that subdirs are directories and readable
if (!isAccessibleDirectory(plugin, logger)) {
continue;
}
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
try {
// add the root
addURL.invoke(classLoader, plugin.toUri().toURL());
// gather files to add
List<Path> libFiles = Lists.newArrayList();
libFiles.addAll(Arrays.asList(files(plugin)));
Path libLocation = plugin.resolve("lib");
if (Files.isDirectory(libLocation)) {
libFiles.addAll(Arrays.asList(files(libLocation)));
}
PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(PLUGIN_LIB_PATTERN);
// if there are jars in it, add it as well
for (Path libFile : libFiles) {
if (!matcher.matches(libFile)) {
continue;
}
addURL.invoke(classLoader, libFile.toUri().toURL());
}
} catch (Throwable e) {
logger.warn("failed to add plugin [" + plugin + "]", e);
}
}
}
}
private Path[] files(Path from) throws IOException {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(from)) {
return Iterators.toArray(stream.iterator(), Path.class);
}
}
private List<Tuple<PluginInfo,Plugin>> loadPluginsFromClasspath(Settings settings) {
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> plugins = ImmutableList.builder();

View File

@ -269,11 +269,11 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell(fdStats == null ? null : fdStats.getMemorySize());
table.addCell(fdStats == null ? null : fdStats.getEvictions());
QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache();
QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getQueryCache();
table.addCell(fcStats == null ? null : fcStats.getMemorySize());
table.addCell(fcStats == null ? null : fcStats.getEvictions());
RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache();
RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getRequestCache();
table.addCell(qcStats == null ? null : qcStats.getMemorySize());
table.addCell(qcStats == null ? null : qcStats.getEvictions());
table.addCell(qcStats == null ? null : qcStats.getHitCount());

View File

@ -27,6 +27,7 @@ import com.google.common.collect.ImmutableMap;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
@ -371,16 +372,18 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id());
contextProcessing(context);
IndexShard indexShard = context.indexShard();
try {
final IndexCache indexCache = context.indexShard().indexService().cache();
final IndexCache indexCache = indexShard.indexService().cache();
final QueryCachingPolicy cachingPolicy = indexShard.getQueryCachingPolicy();
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
indexCache.query(), indexCache.queryPolicy()));
indexCache.query(), cachingPolicy));
} catch (Throwable e) {
processFailure(context, e);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
ShardSearchStats shardSearchStats = context.indexShard().searchService();
ShardSearchStats shardSearchStats = indexShard.searchService();
try {
shardSearchStats.onPreQueryPhase(context);
long time = System.nanoTime();
@ -446,9 +449,11 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
final SearchContext context = findContext(request.id());
contextProcessing(context);
try {
final IndexCache indexCache = context.indexShard().indexService().cache();
final IndexShard indexShard = context.indexShard();
final IndexCache indexCache = indexShard.indexService().cache();
final QueryCachingPolicy cachingPolicy = indexShard.getQueryCachingPolicy();
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
indexCache.query(), indexCache.queryPolicy()));
indexCache.query(), cachingPolicy));
} catch (Throwable e) {
freeContext(context.id());
cleanContext(context);

View File

@ -57,7 +57,7 @@ public final class PipelineAggregatorBuilders {
return new MovAvgBuilder(name);
}
public static final BucketScriptBuilder seriesArithmetic(String name) {
public static final BucketScriptBuilder bucketScript(String name) {
return new BucketScriptBuilder(name);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.transport.netty;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.compress.Compressor;
@ -98,63 +99,91 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
byte status = buffer.readByte();
Version version = Version.fromId(buffer.readInt());
StreamInput wrappedStream;
if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
Compressor compressor;
try {
compressor = CompressorFactory.compressor(buffer);
} catch (NotCompressedException ex) {
int maxToRead = Math.min(buffer.readableBytes(), 10);
int offset = buffer.readerIndex();
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are [");
for (int i = 0; i < maxToRead; i++) {
sb.append(buffer.getByte(offset + i)).append(",");
StreamInput wrappedStream = null;
try {
if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
Compressor compressor;
try {
compressor = CompressorFactory.compressor(buffer);
} catch (NotCompressedException ex) {
int maxToRead = Math.min(buffer.readableBytes(), 10);
int offset = buffer.readerIndex();
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are [");
for (int i = 0; i < maxToRead; i++) {
sb.append(buffer.getByte(offset + i)).append(",");
}
sb.append("]");
throw new IllegalStateException(sb.toString());
}
sb.append("]");
throw new IllegalStateException(sb.toString());
wrappedStream = compressor.streamInput(streamIn);
} else {
wrappedStream = streamIn;
}
wrappedStream = compressor.streamInput(streamIn);
} else {
wrappedStream = streamIn;
}
wrappedStream = new FilterStreamInput(wrappedStream, namedWriteableRegistry);
wrappedStream.setVersion(version);
wrappedStream = new FilterStreamInput(wrappedStream, namedWriteableRegistry);
wrappedStream.setVersion(version);
if (TransportStatus.isRequest(status)) {
String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version);
if (buffer.readerIndex() != expectedIndexReader) {
if (buffer.readerIndex() < expectedIndexReader) {
logger.warn("Message not fully read (request) for requestId [{}], action [{}], readerIndex [{}] vs expected [{}]; resetting",
requestId, action, buffer.readerIndex(), expectedIndexReader);
} else {
logger.warn("Message read past expected size (request) for requestId=[{}], action [{}], readerIndex [{}] vs expected [{}]; resetting",
requestId, action, buffer.readerIndex(), expectedIndexReader);
}
buffer.readerIndex(expectedIndexReader);
}
} else {
TransportResponseHandler handler = transportServiceAdapter.onResponseReceived(requestId);
// ignore if its null, the adapter logs it
if (handler != null) {
if (TransportStatus.isError(status)) {
handlerResponseError(wrappedStream, handler);
} else {
handleResponse(ctx.getChannel(), wrappedStream, handler);
if (TransportStatus.isRequest(status)) {
String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version);
boolean success = false;
try {
final int nextByte = wrappedStream.read();
// calling read() is useful to make sure the message is fully read, even if there is an EOS marker
if (nextByte != -1) {
throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action ["
+ action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
}
if (buffer.readerIndex() < expectedIndexReader) {
throw new IllegalStateException("Message is fully read (request), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
}
if (buffer.readerIndex() > expectedIndexReader) {
throw new IllegalStateException("Message read past expected size (request) for requestId [" + requestId + "], action ["
+ action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
}
success = true;
} finally {
if (!success) {
buffer.readerIndex(expectedIndexReader);
}
}
} else {
// if its null, skip those bytes
buffer.readerIndex(markedReaderIndex + size);
}
if (buffer.readerIndex() != expectedIndexReader) {
if (buffer.readerIndex() < expectedIndexReader) {
logger.warn("Message not fully read (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
TransportResponseHandler handler = transportServiceAdapter.onResponseReceived(requestId);
// ignore if its null, the adapter logs it
if (handler != null) {
if (TransportStatus.isError(status)) {
handlerResponseError(wrappedStream, handler);
} else {
handleResponse(ctx.getChannel(), wrappedStream, handler);
}
} else {
logger.warn("Message read past expected size (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
// if its null, skip those bytes
buffer.readerIndex(markedReaderIndex + size);
}
boolean success = false;
try {
final int nextByte = wrappedStream.read();
// calling read() is useful to make sure the message is fully read, even if there is an EOS marker
if (nextByte != -1) {
throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler ["
+ handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
}
if (buffer.readerIndex() < expectedIndexReader) {
throw new IllegalStateException("Message is fully read (response), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
}
if (buffer.readerIndex() > expectedIndexReader) {
throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId + "], handler ["
+ handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
}
success = true;
} finally {
if (!success) {
buffer.readerIndex(expectedIndexReader);
}
}
buffer.readerIndex(expectedIndexReader);
}
} finally {
IOUtils.close(wrappedStream);
}
wrappedStream.close();
}
protected void handleResponse(Channel channel, StreamInput buffer, final TransportResponseHandler handler) {

View File

@ -25,7 +25,26 @@
//// These permissions apply to the JDK itself:
grant codeBase "file:${{java.ext.dirs}}/*" {
permission java.security.AllPermission;
permission java.security.AllPermission;
};
//// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything.
grant codeBase "${es.security.jar.lucene.core}" {
// needed to allow MMapDirectory's "unmap hack"
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
};
grant codeBase "${es.security.jar.twitter.jsr166e}" {
// needed for LongAdder etc
// TODO: remove this in java 8!
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
};
grant codeBase "${es.security.jar.elasticsearch.securemock}" {
// needed to support creation of mocks
permission java.lang.RuntimePermission "reflectionFactoryAccess";
};
//// Everything else:
@ -67,15 +86,13 @@ grant {
permission java.lang.RuntimePermission "getProtectionDomain";
// reflection hacks:
// needed for Striped64 (what is this doing), also enables unmap hack
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
// needed for mock filesystems in tests (to capture implCloseChannel)
permission java.lang.RuntimePermission "accessClassInPackage.sun.nio.ch";
// needed by groovy engine
permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect";
// needed by aws core sdk (TODO: look into this)
permission java.lang.RuntimePermission "accessClassInPackage.sun.security.ssl";
// needed by RandomizedRunner
permission java.lang.RuntimePermission "accessDeclaredMembers";
// needed by RandomizedRunner
@ -99,9 +116,6 @@ grant {
// needed by JDKESLoggerTests
permission java.util.logging.LoggingPermission "control";
// needed by Mockito
permission java.lang.RuntimePermission "reflectionFactoryAccess";
// needed to install SSLFactories, advanced SSL configuration, etc.
permission java.lang.RuntimePermission "setFactory";
};

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.bulk;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetRequestBuilder;

View File

@ -50,6 +50,13 @@ public class BootstrapForTesting {
static {
// just like bootstrap, initialize natives, then SM
Bootstrap.initializeNatives(true, true, true);
// check for jar hell
try {
JarHell.checkJarHell();
} catch (Exception e) {
throw new RuntimeException("found jar hell in test classpath", e);
}
// make sure java.io.tmpdir exists always (in case code uses it in a static initializer)
Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
@ -63,6 +70,7 @@ public class BootstrapForTesting {
// install security manager if requested
if (systemPropertyAsBoolean("tests.security.manager", false)) {
try {
Security.setCodebaseProperties();
// initialize paths the same exact way as bootstrap.
Permissions perms = new Permissions();
Path basedir = PathUtils.get(Objects.requireNonNull(System.getProperty("project.basedir"),

View File

@ -0,0 +1,47 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.junit.Test;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
public class BootstrapTests extends ElasticsearchTestCase {
@Test
public void testHasLibExtension() {
PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(Bootstrap.PLUGIN_LIB_PATTERN);
Path p = PathUtils.get("path", "to", "plugin.jar");
assertTrue(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin.zip");
assertTrue(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin.tar.gz");
assertFalse(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin");
assertFalse(matcher.matches(p));
}
}

View File

@ -179,4 +179,29 @@ public class SecurityTests extends ElasticsearchTestCase {
fail("didn't get expected exception");
} catch (IOException expected) {}
}
/** We only grant this to special jars */
public void testUnsafeAccess() throws Exception {
assumeTrue("test requires security manager", System.getSecurityManager() != null);
try {
// class could be legitimately loaded, so we might not fail until setAccessible
Class.forName("sun.misc.Unsafe")
.getDeclaredField("theUnsafe")
.setAccessible(true);
fail("didn't get expected exception");
} catch (SecurityException expected) {
// ok
} catch (Exception somethingElse) {
assumeNoException("perhaps JVM doesn't have Unsafe?", somethingElse);
}
}
/** can't execute processes */
public void testProcessExecution() throws Exception {
assumeTrue("test requires security manager", System.getSecurityManager() != null);
try {
Runtime.getRuntime().exec("ls");
fail("didn't get expected exception");
} catch (SecurityException expected) {}
}
}

View File

@ -19,10 +19,12 @@
package org.elasticsearch.client.node;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSet;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.GenericAction;
import org.elasticsearch.action.support.ActionFilter;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.client.AbstractClientHeadersTests;
@ -31,6 +33,7 @@ import org.elasticsearch.client.support.Headers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Collections;
import java.util.HashMap;
/**
@ -38,7 +41,7 @@ import java.util.HashMap;
*/
public class NodeClientHeadersTests extends AbstractClientHeadersTests {
private static final ActionFilters EMPTY_FILTERS = new ActionFilters(ImmutableSet.of());
private static final ActionFilters EMPTY_FILTERS = new ActionFilters(Collections.<ActionFilter>emptySet());
@Override
protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.collect;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.test.ElasticsearchTestCase;
import java.util.HashMap;

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.lucene.index;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Maps;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.lucene.analysis.core.KeywordAnalyzer;

View File

@ -18,10 +18,11 @@
*/
package org.elasticsearch.index.store;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.google.common.base.Charsets;
import com.google.common.base.Predicate;
import com.google.common.collect.Lists;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexFileNames;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.store;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
import com.google.common.collect.Lists;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;

View File

@ -154,7 +154,7 @@ public class RecoveryStateTest extends ElasticsearchTestCase {
if (randomBoolean()) {
timer.stop();
assertThat(timer.stopTime(), greaterThanOrEqualTo(timer.startTime()));
assertThat(timer.time(), equalTo(timer.stopTime() - timer.startTime()));
assertThat(timer.time(), greaterThan(0l));
lastRead = streamer.serializeDeserialize();
assertThat(lastRead.startTime(), equalTo(timer.startTime()));
assertThat(lastRead.time(), equalTo(timer.time()));
@ -286,8 +286,7 @@ public class RecoveryStateTest extends ElasticsearchTestCase {
if (completeRecovery) {
assertThat(filesToRecover.size(), equalTo(0));
index.stop();
assertThat(index.time(), equalTo(index.stopTime() - index.startTime()));
assertThat(index.time(), equalTo(index.stopTime() - index.startTime()));
assertThat(index.time(), greaterThanOrEqualTo(0l));
}
logger.info("testing serialized information");

View File

@ -41,7 +41,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.query.QueryCacheModule;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
import org.elasticsearch.index.cache.query.QueryCacheModule.QueryCacheSettings;
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.MergePolicyConfig;
@ -79,8 +79,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
//Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
.put(IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, "1ms")
.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class)
.put(QueryCacheSettings.QUERY_CACHE_EVERYTHING, true)
.put(QueryCacheModule.QueryCacheSettings.QUERY_CACHE_TYPE, IndexQueryCache.class)
.build();
}
@ -143,7 +143,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
.execute().actionGet();
assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getQueryCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test")
.clear().setFieldData(true).setQueryCache(true)
@ -164,7 +164,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
.execute().actionGet();
assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getQueryCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
indicesStats = client().admin().indices().prepareStats("test")
.clear().setFieldData(true).setQueryCache(true)
@ -177,7 +177,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
.execute().actionGet();
assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getQueryCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
indicesStats = client().admin().indices().prepareStats("test")
.clear().setFieldData(true).setQueryCache(true)
@ -970,7 +970,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
assertEquals(stats1.getTotalCount(), stats2.getTotalCount());
}
private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) {
private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) {
assertAllSuccessful(response);
QueryCacheStats total = response.getTotal().queryCache;
QueryCacheStats indexTotal = new QueryCacheStats();
@ -993,13 +993,13 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
ensureGreen();
IndicesStatsResponse response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeFilterCacheStats(response);
assertCumulativeQueryCacheStats(response);
assertEquals(0, response.getTotal().queryCache.getCacheSize());
SearchResponse r;
assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeFilterCacheStats(response);
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), equalTo(0L));
assertThat(response.getTotal().queryCache.getEvictions(), equalTo(0L));
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
@ -1007,7 +1007,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeFilterCacheStats(response);
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getEvictions(), equalTo(0L));
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
@ -1017,7 +1017,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
assertTrue(client().prepareDelete("index", "type", "2").get().isFound());
refresh();
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeFilterCacheStats(response);
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L));
@ -1029,7 +1029,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeFilterCacheStats(response);
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
@ -1038,7 +1038,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
assertAllSuccessful(client().admin().indices().prepareClearCache("index").setQueryCache(true).get());
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeFilterCacheStats(response);
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));

View File

@ -1,85 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import com.google.common.collect.Lists;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.junit.Test;
import java.net.URISyntaxException;
import java.util.Collections;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
*/
@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes=0, transportClientRatio = 0)
public class PluginLuceneCheckerTests extends PluginTestCase {
/**
* We check that no Lucene version checking is done
* when we set `"plugins.check_lucene":false`
*/
@Test
public void testDisableLuceneVersionCheckingPlugin() throws URISyntaxException {
String serverNodeId = startNodeWithPlugins(
settingsBuilder().put(PluginsService.PLUGINS_CHECK_LUCENE_KEY, false)
.put(PluginsService.ES_PLUGIN_PROPERTIES_FILE_KEY, "es-plugin-test.properties")
.put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true).build(),
"/org/elasticsearch/plugins/lucene/");
logger.info("--> server {} started" + serverNodeId);
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).execute().actionGet();
logger.info("--> full json answer, status " + response.toString());
ElasticsearchAssertions.assertNodeContainsPlugins(response, serverNodeId,
Lists.newArrayList("old-lucene"), Lists.newArrayList("old"), Lists.newArrayList("1.0.0"), // JVM Plugin
Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
}
/**
* We check that with an old plugin (built on an old Lucene version)
* plugin is not loaded
* We check that with a recent plugin (built on current Lucene version)
* plugin is loaded
* We check that with a too recent plugin (built on an unknown Lucene version)
* plugin is not loaded
*/
@Test
public void testEnableLuceneVersionCheckingPlugin() throws URISyntaxException {
String serverNodeId = startNodeWithPlugins(
settingsBuilder().put(PluginsService.PLUGINS_CHECK_LUCENE_KEY, true)
.put(PluginsService.ES_PLUGIN_PROPERTIES_FILE_KEY, "es-plugin-test.properties")
.put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true).build(),
"/org/elasticsearch/plugins/lucene/");
logger.info("--> server {} started" + serverNodeId);
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).execute().actionGet();
logger.info("--> full json answer, status " + response.toString());
ElasticsearchAssertions.assertNodeContainsPlugins(response, serverNodeId,
Lists.newArrayList("current-lucene"), Lists.newArrayList("current"), Lists.newArrayList("2.0.0"), // JVM Plugin
Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
}
}

View File

@ -1,123 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.loading.classpath.InClassPathPlugin;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.junit.Test;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.instanceOf;
@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes=0, numClientNodes = 1, transportClientRatio = 0)
public class PluginServiceTests extends PluginTestCase {
@Test
public void testPluginLoadingFromClassName() throws URISyntaxException {
Settings settings = settingsBuilder()
// Defines a plugin in classpath
.put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put(PluginsService.ES_PLUGIN_PROPERTIES_FILE_KEY, "es-plugin-test.properties")
// Defines a plugin in settings
.put("plugin.types", InSettingsPlugin.class.getName())
.build();
startNodeWithPlugins(settings, "/org/elasticsearch/plugins/loading/");
Plugin plugin = getPlugin("in-settings-plugin");
assertNotNull("InSettingsPlugin (defined below in this class) must be loaded", plugin);
assertThat(plugin, instanceOf(InSettingsPlugin.class));
plugin = getPlugin("in-classpath-plugin");
assertNotNull("InClassPathPlugin (defined in package ) must be loaded", plugin);
assertThat(plugin, instanceOf(InClassPathPlugin.class));
plugin = getPlugin("in-jar-plugin");
assertNotNull("InJarPlugin (packaged as a JAR file in a plugins directory) must be loaded", plugin);
assertThat(plugin.getClass().getName(), endsWith("InJarPlugin"));
plugin = getPlugin("in-zip-plugin");
assertNotNull("InZipPlugin (packaged as a Zipped file in a plugins directory) must be loaded", plugin);
assertThat(plugin.getClass().getName(), endsWith("InZipPlugin"));
}
@Test
public void testHasLibExtension() {
PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(PluginsService.PLUGIN_LIB_PATTERN);
Path p = PathUtils.get("path", "to", "plugin.jar");
assertTrue(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin.zip");
assertTrue(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin.tar.gz");
assertFalse(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin");
assertFalse(matcher.matches(p));
}
private Plugin getPlugin(String pluginName) {
assertNotNull("cannot check plugin existence with a null plugin's name", pluginName);
PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
ImmutableList<Tuple<PluginInfo, Plugin>> plugins = pluginsService.plugins();
if ((plugins != null) && (!plugins.isEmpty())) {
for (Tuple<PluginInfo, Plugin> plugin:plugins) {
if (pluginName.equals(plugin.v1().getName())) {
return plugin.v2();
}
}
}
return null;
}
static class InSettingsPlugin extends AbstractPlugin {
private final Settings settings;
public InSettingsPlugin(Settings settings) {
this.settings = settings;
}
@Override
public String name() {
return "in-settings-plugin";
}
@Override
public String description() {
return "A plugin defined in settings";
}
}
}

View File

@ -53,7 +53,7 @@ import java.util.Map;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.seriesArithmetic;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@ -533,7 +533,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("threeSum").field("three"))
.subAggregation(sum("fourSum").field("four"))
.subAggregation(
seriesArithmetic("totalSum").setBucketsPaths("twoSum", "threeSum", "fourSum").script(
bucketScript("totalSum").setBucketsPaths("twoSum", "threeSum", "fourSum").script(
new Script("_value0 + _value1 + _value2", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null)))).execute().actionGet();
InternalHistogram<Bucket> histogram = response.getAggregations().get("histogram");

View File

@ -42,7 +42,7 @@ import java.util.Map;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.seriesArithmetic;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
@ -105,7 +105,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("_value0 + _value1 + _value2", ScriptType.INLINE, null, null)))).execute().actionGet();
assertSearchResponse(response);
@ -153,7 +153,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("_value0 + _value1 / _value2", ScriptType.INLINE, null, null)))).execute().actionGet();
assertSearchResponse(response);
@ -199,7 +199,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.interval(interval)
.subAggregation(sum("field2Sum").field(FIELD_2_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum").script(
new Script("_value0", ScriptType.INLINE, null, null)))).execute().actionGet();
assertSearchResponse(response);
@ -245,7 +245,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPathsMap(bucketsPathsMap ).script(
bucketScript("seriesArithmetic").setBucketsPathsMap(bucketsPathsMap ).script(
new Script("foo + bar + baz", ScriptType.INLINE, null, null)))).execute().actionGet();
assertSearchResponse(response);
@ -295,7 +295,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("(_value0 + _value1 + _value2) * factor", ScriptType.INLINE, null, params)))).execute().actionGet();
assertSearchResponse(response);
@ -343,7 +343,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("_value0 + _value1 + _value2", ScriptType.INLINE, null, null)).gapPolicy(GapPolicy.INSERT_ZEROS))).execute().actionGet();
assertSearchResponse(response);
@ -391,7 +391,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("my_script", ScriptType.INDEXED, null, null)).gapPolicy(GapPolicy.INSERT_ZEROS))).execute().actionGet();
assertSearchResponse(response);
@ -438,7 +438,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("_value0 + _value1 + _value2", ScriptType.INLINE, null, null))))
.execute().actionGet();
@ -462,7 +462,7 @@ public class BucketScriptTests extends ElasticsearchIntegrationTest {
.subAggregation(sum("field3Sum").field(FIELD_3_NAME))
.subAggregation(sum("field4Sum").field(FIELD_4_NAME))
.subAggregation(
seriesArithmetic("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
bucketScript("seriesArithmetic").setBucketsPaths("field2Sum", "field3Sum", "field4Sum").script(
new Script("_value0 + _value1 + _value2", ScriptType.INLINE, null, null)))).execute().actionGet();
assertSearchResponse(response);

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.lucene.search.function.CombineFunction;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.cache.query.QueryCacheModule;
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
import org.elasticsearch.index.cache.query.QueryCacheModule.QueryCacheSettings;
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
import org.elasticsearch.index.mapper.MergeMappingException;
import org.elasticsearch.index.query.HasChildQueryBuilder;
@ -75,8 +75,8 @@ public class ChildQuerySearchTests extends ElasticsearchIntegrationTest {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
// aggressive filter caching so that we can assert on the filter cache size
.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class)
.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
.put(QueryCacheModule.QueryCacheSettings.QUERY_CACHE_TYPE, IndexQueryCache.class)
.put(QueryCacheSettings.QUERY_CACHE_EVERYTHING, true)
.build();
}

View File

@ -22,7 +22,7 @@ package org.elasticsearch.search.scriptfilter;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.query.QueryCacheModule;
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
import org.elasticsearch.index.cache.query.QueryCacheModule.QueryCacheSettings;
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
@ -51,8 +51,8 @@ public class ScriptQuerySearchTests extends ElasticsearchIntegrationTest {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
// aggressive filter caching so that we can assert on the number of iterations of the script filters
.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class)
.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
.put(QueryCacheModule.QueryCacheSettings.QUERY_CACHE_TYPE, IndexQueryCache.class)
.put(QueryCacheSettings.QUERY_CACHE_EVERYTHING, true)
.build();
}

View File

@ -1650,7 +1650,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
}
}
return new ExternalTestCluster(transportAddresses);
return new ExternalTestCluster(createTempDir(), transportAddresses);
}
protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {

View File

@ -37,6 +37,7 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Path;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicInteger;
@ -64,12 +65,13 @@ public final class ExternalTestCluster extends TestCluster {
private final int numDataNodes;
private final int numMasterAndDataNodes;
public ExternalTestCluster(TransportAddress... transportAddresses) {
public ExternalTestCluster(Path tempDir, TransportAddress... transportAddresses) {
super(0);
Settings clientSettings = Settings.settingsBuilder()
.put("name", InternalTestCluster.TRANSPORT_CLIENT_PREFIX + EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement())
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // prevents any settings to be replaced by system properties.
.put("client.transport.ignore_cluster_name", true)
.put("path.home", tempDir)
.put("node.mode", "network").build(); // we require network here!
this.client = TransportClient.builder().settings(clientSettings).build().addTransportAddresses(transportAddresses);
@ -142,7 +144,7 @@ public final class ExternalTestCluster extends TestCluster {
// turn increments the breaker, making it non-0
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
assertThat("Filter cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
}
}

View File

@ -73,7 +73,7 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.query.QueryCacheModule;
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
import org.elasticsearch.index.cache.query.QueryCacheModule.QueryCacheSettings;
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
import org.elasticsearch.index.cache.query.none.NoneQueryCache;
import org.elasticsearch.index.engine.CommitStats;
@ -438,11 +438,11 @@ public final class InternalTestCluster extends TestCluster {
}
if (random.nextBoolean()) {
builder.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexQueryCache.class : NoneQueryCache.class);
builder.put(QueryCacheModule.QueryCacheSettings.QUERY_CACHE_TYPE, random.nextBoolean() ? IndexQueryCache.class : NoneQueryCache.class);
}
if (random.nextBoolean()) {
builder.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, random.nextBoolean());
builder.put(QueryCacheSettings.QUERY_CACHE_EVERYTHING, random.nextBoolean());
}
if (random.nextBoolean()) {
@ -1807,7 +1807,7 @@ public final class InternalTestCluster extends TestCluster {
NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false);
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
assertThat("Filter cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.test;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
import com.google.common.collect.Lists;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;

View File

@ -19,7 +19,6 @@
package org.elasticsearch.transport;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableList;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.elasticsearch.action.*;
@ -380,7 +379,9 @@ public class ContextAndHeaderTransportTests extends ElasticsearchIntegrationTest
@Override
public Collection<Class<? extends Module>> modules() {
return ImmutableList.of(ActionLoggingModule.class);
Collection<Class<? extends Module>> classes = new ArrayList<>();
classes.add(ActionLoggingModule.class);
return classes;
}
}

View File

@ -44,7 +44,9 @@ nodes.
Now, if we start two more nodes with `node.rack_id` set to `rack_two`,
Elasticsearch will move shards across to the new nodes, ensuring (if possible)
that the primary and replica shards are never in the same rack.
that no two copies of the same shard will be in the same rack. However if `rack_two`
were to fail, taking down both of its nodes, Elasticsearch will still allocate the lost
shard copies to nodes in `rack_one`.
.Prefer local shards
*********************************************

View File

@ -52,8 +52,8 @@ fields referenced inside the query must use the complete path (fully
qualified).
The `score_mode` allows to set how inner children matching affects
scoring of parent. It defaults to `avg`, but can be `sum`, `max` and
`none`.
scoring of parent. It defaults to `avg`, but can be `sum`, `min`,
`max` and `none`.
Multi level nesting is automatically supported, and detected, resulting
in an inner nested query to automatically match the relevant nesting

View File

@ -1 +0,0 @@
49c100caf72d658aca8e58bd74a4ba90fa2b0d70

View File

@ -44,6 +44,12 @@ governing permissions and limitations under the License. -->
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-management-compute</artifactId>
<version>0.7.0</version>
<exclusions>
<exclusion>
<groupId>stax</groupId>
<artifactId>stax-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.microsoft.azure</groupId>

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1 +0,0 @@
381ae6c8add7c98e9bcddb1e0b898400cd6fc3f7

View File

@ -43,6 +43,12 @@ governing permissions and limitations under the License. -->
<groupId>com.google.apis</groupId>
<artifactId>google-api-services-compute</artifactId>
<version>${google.gce.version}</version>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava-jdk5</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>

View File

@ -29,11 +29,6 @@
<artifactId>hamcrest-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-test-framework</artifactId>

17
pom.xml
View File

@ -153,10 +153,23 @@
<version>1.0</version>
</dependency>
<dependency>
<groupId>com.twitter</groupId>
<artifactId>jsr166e</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-test-framework</artifactId>
<version>${lucene.maven.version}</version>
<!-- we specify our own junit4 version -->
<exclusions>
<exclusion>
<groupId>com.carrotsearch.randomizedtesting</groupId>
<artifactId>junit4-ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
@ -737,9 +750,6 @@
<internalRuntimeForbidden>true</internalRuntimeForbidden>
<!-- if the used Java version is too new, don't fail, just do nothing: -->
<failOnUnsupportedJava>false</failOnUnsupportedJava>
<excludes>
<exclude>jsr166e/**</exclude>
</excludes>
<bundledSignatures>
<!-- This will automatically choose the right signatures based on 'targetVersion': -->
<bundledSignature>jdk-unsafe</bundledSignature>
@ -1457,6 +1467,7 @@ org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UT
</profile>
</profiles>
<modules>
<module>securemock</module>
<module>dev-tools</module>
<module>rest-api-spec</module>
<module>plugins</module>

View File

@ -16,10 +16,12 @@
- do:
indices.recovery:
index: [test_1]
human: true
- match: { test_1.shards.0.type: "STORE" }
- match: { test_1.shards.0.type: "STORE" }
- match: { test_1.shards.0.stage: "DONE" }
- match: { test_1.shards.0.primary: true }
- match: { test_1.shards.0.start_time: /^2\d\d\d-.+/ }
- match: { test_1.shards.0.target.ip: /^\d+\.\d+\.\d+\.\d+$/ }
- gte: { test_1.shards.0.index.files.total: 0 }
- gte: { test_1.shards.0.index.files.reused: 0 }

107
securemock/pom.xml Normal file
View File

@ -0,0 +1,107 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.elasticsearch</groupId>
<artifactId>securemock</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>securemock</name>
<description>Allows creating mocks in tests without having to grant dangerous permissions to all of your code.</description>
<inceptionYear>2015</inceptionYear>
<parent>
<groupId>org.sonatype.oss</groupId>
<artifactId>oss-parent</artifactId>
<version>7</version>
</parent>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git@github.com:elastic/elasticsearch.git</connection>
<developerConnection>scm:git:git@github.com:elastic/elasticsearch.git</developerConnection>
<url>http://github.com/elastic/elasticsearch/securemock</url>
</scm>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
</properties>
<repositories>
<repository>
<id>oss-snapshots</id>
<name>Sonatype OSS Snapshots</name>
<url>https://oss.sonatype.org/content/repositories/snapshots/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>1.9.5</version>
</dependency>
<dependency> <!--required for mockito-->
<groupId>org.objenesis</groupId>
<artifactId>objenesis</artifactId>
<version>2.1</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<artifactSet>
<excludes>
<exclude>junit:junit</exclude>
<exclude>org.hamcrest:hamcrest-core</exclude>
</excludes>
</artifactSet>
<!-- switch the Mockito classes to give a binary compatible api -->
<relocations>
<relocation>
<pattern>org.mockito</pattern>
<shadedPattern>org.elasticsearch.mock.orig</shadedPattern>
<includes>
<include>org.mockito.Mockito</include>
</includes>
</relocation>
<relocation>
<pattern>org.elasticsearch.mock</pattern>
<shadedPattern>org.mockito</shadedPattern>
<includes>
<include>org.elasticsearch.mock.Mockito*</include>
</includes>
</relocation>
</relocations>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,367 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.mock;
import org.mockito.InOrder;
import org.mockito.Matchers;
import org.mockito.MockSettings;
import org.mockito.MockingDetails;
import org.mockito.ReturnValues;
import org.mockito.stubbing.Answer;
import org.mockito.stubbing.DeprecatedOngoingStubbing;
import org.mockito.stubbing.OngoingStubbing;
import org.mockito.stubbing.Stubber;
import org.mockito.stubbing.VoidMethodStubbable;
import org.mockito.verification.VerificationMode;
import org.mockito.verification.VerificationWithTimeout;
import java.security.AccessController;
import java.security.PrivilegedAction;
/**
* Wraps Mockito API with calls to AccessController.
* <p>
* This is useful if you want to mock in a securitymanager environment,
* but contain the permissions to only mocking test libraries.
* <p>
* Instead of:
* <pre>
* grant {
* permission java.lang.RuntimePermission "reflectionFactoryAccess";
* };
* </pre>
* You can just change maven dependencies to use securemock.jar, and then:
* <pre>
* grant codeBase "/url/to/securemock.jar" {
* permission java.lang.RuntimePermission "reflectionFactoryAccess";
* };
* </pre>
*/
public class Mockito extends Matchers {
public static final Answer<Object> RETURNS_DEFAULTS = org.mockito.Mockito.RETURNS_DEFAULTS;
public static final Answer<Object> RETURNS_SMART_NULLS = org.mockito.Mockito.RETURNS_SMART_NULLS;
public static final Answer<Object> RETURNS_MOCKS = org.mockito.Mockito.RETURNS_MOCKS;
public static final Answer<Object> RETURNS_DEEP_STUBS = org.mockito.Mockito.RETURNS_DEEP_STUBS;
public static final Answer<Object> CALLS_REAL_METHODS = org.mockito.Mockito.CALLS_REAL_METHODS;
public static <T> T mock(final Class<T> classToMock) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.mock(classToMock);
}
});
}
public static <T> T mock(final Class<T> classToMock, final String name) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.mock(classToMock, name);
}
});
}
public static MockingDetails mockingDetails(final Object toInspect) {
return AccessController.doPrivileged(new PrivilegedAction<MockingDetails>() {
@Override
public MockingDetails run() {
return org.mockito.Mockito.mockingDetails(toInspect);
}
});
}
@Deprecated
public static <T> T mock(final Class<T> classToMock, final ReturnValues returnValues) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.mock(classToMock, returnValues);
}
});
}
public static <T> T mock(final Class<T> classToMock, final Answer defaultAnswer) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.mock(classToMock, defaultAnswer);
}
});
}
public static <T> T mock(final Class<T> classToMock, final MockSettings mockSettings) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.mock(classToMock, mockSettings);
}
});
}
public static <T> T spy(final T object) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.spy(object);
}
});
}
public static <T> DeprecatedOngoingStubbing<T> stub(final T methodCall) {
return AccessController.doPrivileged(new PrivilegedAction<DeprecatedOngoingStubbing<T>>() {
@Override
public DeprecatedOngoingStubbing<T> run() {
return org.mockito.Mockito.stub(methodCall);
}
});
}
public static <T> OngoingStubbing<T> when(final T methodCall) {
return AccessController.doPrivileged(new PrivilegedAction<OngoingStubbing<T>>() {
@Override
public OngoingStubbing<T> run() {
return org.mockito.Mockito.when(methodCall);
}
});
}
public static <T> T verify(final T mock) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.verify(mock);
}
});
}
public static <T> T verify(final T mock, final VerificationMode mode) {
return AccessController.doPrivileged(new PrivilegedAction<T>() {
@Override
public T run() {
return org.mockito.Mockito.verify(mock, mode);
}
});
}
public static <T> void reset(final T ... mocks) {
AccessController.doPrivileged(new PrivilegedAction<Void>() {
@Override
public Void run() {
org.mockito.Mockito.reset(mocks);
return null;
}
});
}
public static void verifyNoMoreInteractions(final Object... mocks) {
AccessController.doPrivileged(new PrivilegedAction<Void>() {
@Override
public Void run() {
org.mockito.Mockito.verifyNoMoreInteractions(mocks);
return null;
}
});
}
public static void verifyZeroInteractions(final Object... mocks) {
AccessController.doPrivileged(new PrivilegedAction<Void>() {
@Override
public Void run() {
org.mockito.Mockito.verifyZeroInteractions(mocks);
return null;
}
});
}
@Deprecated
public static <T> VoidMethodStubbable<T> stubVoid(final T mock) {
return AccessController.doPrivileged(new PrivilegedAction<VoidMethodStubbable<T>>() {
@Override
public VoidMethodStubbable<T> run() {
return org.mockito.Mockito.stubVoid(mock);
}
});
}
public static Stubber doThrow(final Throwable toBeThrown) {
return AccessController.doPrivileged(new PrivilegedAction<Stubber>() {
@Override
public Stubber run() {
return org.mockito.Mockito.doThrow(toBeThrown);
}
});
}
public static Stubber doThrow(final Class<? extends Throwable> toBeThrown) {
return AccessController.doPrivileged(new PrivilegedAction<Stubber>() {
@Override
public Stubber run() {
return org.mockito.Mockito.doThrow(toBeThrown);
}
});
}
public static Stubber doCallRealMethod() {
return AccessController.doPrivileged(new PrivilegedAction<Stubber>() {
@Override
public Stubber run() {
return org.mockito.Mockito.doCallRealMethod();
}
});
}
public static Stubber doAnswer(final Answer answer) {
return AccessController.doPrivileged(new PrivilegedAction<Stubber>() {
@Override
public Stubber run() {
return org.mockito.Mockito.doAnswer(answer);
}
});
}
public static Stubber doNothing() {
return AccessController.doPrivileged(new PrivilegedAction<Stubber>() {
@Override
public Stubber run() {
return org.mockito.Mockito.doNothing();
}
});
}
public static Stubber doReturn(final Object toBeReturned) {
return AccessController.doPrivileged(new PrivilegedAction<Stubber>() {
@Override
public Stubber run() {
return org.mockito.Mockito.doReturn(toBeReturned);
}
});
}
public static InOrder inOrder(final Object... mocks) {
return AccessController.doPrivileged(new PrivilegedAction<InOrder>() {
@Override
public InOrder run() {
return org.mockito.Mockito.inOrder(mocks);
}
});
}
public static Object[] ignoreStubs(final Object... mocks) {
return AccessController.doPrivileged(new PrivilegedAction<Object[]>() {
@Override
public Object[] run() {
return org.mockito.Mockito.ignoreStubs(mocks);
}
});
}
public static VerificationMode times(final int wantedNumberOfInvocations) {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.times(wantedNumberOfInvocations);
}
});
}
public static VerificationMode never() {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.never();
}
});
}
public static VerificationMode atLeastOnce() {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.atLeastOnce();
}
});
}
public static VerificationMode atLeast(final int minNumberOfInvocations) {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.atLeast(minNumberOfInvocations);
}
});
}
public static VerificationMode atMost(final int maxNumberOfInvocations) {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.atMost(maxNumberOfInvocations);
}
});
}
public static VerificationMode calls(final int wantedNumberOfInvocations) {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.calls(wantedNumberOfInvocations);
}
});
}
public static VerificationMode only() {
return AccessController.doPrivileged(new PrivilegedAction<VerificationMode>() {
@Override
public VerificationMode run() {
return org.mockito.Mockito.only();
}
});
}
public static VerificationWithTimeout timeout(final int millis) {
return AccessController.doPrivileged(new PrivilegedAction<VerificationWithTimeout>() {
@Override
public VerificationWithTimeout run() {
return org.mockito.Mockito.timeout(millis);
}
});
}
public static void validateMockitoUsage() {
AccessController.doPrivileged(new PrivilegedAction<Void>() {
@Override
public Void run() {
org.mockito.Mockito.validateMockitoUsage();
return null;
}
});
}
public static MockSettings withSettings() {
return AccessController.doPrivileged(new PrivilegedAction<MockSettings>() {
@Override
public MockSettings run() {
return org.mockito.Mockito.withSettings();
}
});
}
}