Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
2784e2f7f0
|
@ -19,14 +19,13 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
|
@ -40,10 +39,9 @@ public class Build {
|
|||
String hashShort = "NA";
|
||||
String timestamp = "NA";
|
||||
|
||||
try {
|
||||
String properties = Streams.copyToStringFromClasspath("/es-build.properties");
|
||||
try (InputStream is = Build.class.getResourceAsStream("/es-build.properties")){
|
||||
Properties props = new Properties();
|
||||
props.load(new FastStringReader(properties));
|
||||
props.load(is);
|
||||
hash = props.getProperty("hash", hash);
|
||||
if (!hash.equals("NA")) {
|
||||
hashShort = hash.substring(0, 7);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -32,7 +33,9 @@ import java.text.ParseException;
|
|||
*/
|
||||
class ShardUpgradeResponse extends BroadcastShardResponse {
|
||||
|
||||
private org.apache.lucene.util.Version version;
|
||||
private org.apache.lucene.util.Version oldestLuceneSegment;
|
||||
|
||||
private Version upgradeVersion;
|
||||
|
||||
private boolean primary;
|
||||
|
||||
|
@ -40,14 +43,19 @@ class ShardUpgradeResponse extends BroadcastShardResponse {
|
|||
ShardUpgradeResponse() {
|
||||
}
|
||||
|
||||
ShardUpgradeResponse(ShardId shardId, boolean primary, org.apache.lucene.util.Version version) {
|
||||
ShardUpgradeResponse(ShardId shardId, boolean primary, Version upgradeVersion, org.apache.lucene.util.Version oldestLuceneSegment) {
|
||||
super(shardId);
|
||||
this.primary = primary;
|
||||
this.version = version;
|
||||
this.upgradeVersion = upgradeVersion;
|
||||
this.oldestLuceneSegment = oldestLuceneSegment;
|
||||
}
|
||||
|
||||
public org.apache.lucene.util.Version version() {
|
||||
return this.version;
|
||||
public org.apache.lucene.util.Version oldestLuceneSegment() {
|
||||
return this.oldestLuceneSegment;
|
||||
}
|
||||
|
||||
public Version upgradeVersion() {
|
||||
return this.upgradeVersion;
|
||||
}
|
||||
|
||||
public boolean primary() {
|
||||
|
@ -59,18 +67,21 @@ class ShardUpgradeResponse extends BroadcastShardResponse {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
primary = in.readBoolean();
|
||||
upgradeVersion = Version.readVersion(in);
|
||||
try {
|
||||
version = org.apache.lucene.util.Version.parse(in.readString());
|
||||
oldestLuceneSegment = org.apache.lucene.util.Version.parse(in.readString());
|
||||
} catch (ParseException ex) {
|
||||
throw new IOException("failed to parse lucene version [" + version + "]", ex);
|
||||
throw new IOException("failed to parse lucene version [" + oldestLuceneSegment + "]", ex);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(primary);
|
||||
out.writeString(version.toString());
|
||||
Version.writeVersion(upgradeVersion, out);
|
||||
out.writeString(oldestLuceneSegment.toString());
|
||||
}
|
||||
|
||||
}
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.PrimaryMissingActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
|
@ -34,6 +34,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -75,7 +76,7 @@ public class TransportUpgradeAction extends TransportBroadcastAction<UpgradeRequ
|
|||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
Map<String, Integer> successfulPrimaryShards = newHashMap();
|
||||
Map<String, Version> versions = newHashMap();
|
||||
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = newHashMap();
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
|
@ -94,20 +95,35 @@ public class TransportUpgradeAction extends TransportBroadcastAction<UpgradeRequ
|
|||
Integer count = successfulPrimaryShards.get(index);
|
||||
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
|
||||
}
|
||||
Version version = versions.get(index);
|
||||
if (version == null || shardUpgradeResponse.version().onOrAfter(version) == false) {
|
||||
versions.put(index, shardUpgradeResponse.version());
|
||||
Tuple<Version, org.apache.lucene.util.Version> versionTuple = versions.get(index);
|
||||
if (versionTuple == null) {
|
||||
versions.put(index, new Tuple<>(shardUpgradeResponse.upgradeVersion(), shardUpgradeResponse.oldestLuceneSegment()));
|
||||
} else {
|
||||
// We already have versions for this index - let's see if we need to update them based on the current shard
|
||||
Version version = versionTuple.v1();
|
||||
org.apache.lucene.util.Version luceneVersion = versionTuple.v2();
|
||||
// For the metadata we are interested in the _latest_ elasticsearch version that was processing the metadata
|
||||
// Since we rewrite the mapping during upgrade the metadata is always rewritten by the latest version
|
||||
if (shardUpgradeResponse.upgradeVersion().after(versionTuple.v1())) {
|
||||
version = shardUpgradeResponse.upgradeVersion();
|
||||
}
|
||||
// For the lucene version we are interested in the _oldest_ lucene version since it determines the
|
||||
// oldest version that we need to support
|
||||
if (shardUpgradeResponse.oldestLuceneSegment().onOrAfter(versionTuple.v2()) == false) {
|
||||
luceneVersion = shardUpgradeResponse.oldestLuceneSegment();
|
||||
}
|
||||
versions.put(index, new Tuple<>(version, luceneVersion));
|
||||
}
|
||||
}
|
||||
}
|
||||
Map<String, String> updatedVersions = newHashMap();
|
||||
Map<String, Tuple<org.elasticsearch.Version, String>> updatedVersions = newHashMap();
|
||||
MetaData metaData = clusterState.metaData();
|
||||
for (Map.Entry<String, Version> versionEntry : versions.entrySet()) {
|
||||
for (Map.Entry<String, Tuple<Version, org.apache.lucene.util.Version>> versionEntry : versions.entrySet()) {
|
||||
String index = versionEntry.getKey();
|
||||
Integer primaryCount = successfulPrimaryShards.get(index);
|
||||
int expectedPrimaryCount = metaData.index(index).getNumberOfShards();
|
||||
if (primaryCount == metaData.index(index).getNumberOfShards()) {
|
||||
updatedVersions.put(index, versionEntry.getValue().toString());
|
||||
updatedVersions.put(index, new Tuple<>(versionEntry.getValue().v1(), versionEntry.getValue().v2().toString()));
|
||||
} else {
|
||||
logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - expected[{}], received[{}]", index,
|
||||
expectedPrimaryCount, primaryCount == null ? 0 : primaryCount);
|
||||
|
@ -130,8 +146,9 @@ public class TransportUpgradeAction extends TransportBroadcastAction<UpgradeRequ
|
|||
@Override
|
||||
protected ShardUpgradeResponse shardOperation(ShardUpgradeRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||
org.apache.lucene.util.Version version = indexShard.upgrade(request.upgradeRequest());
|
||||
return new ShardUpgradeResponse(request.shardId(), indexShard.routingEntry().primary(), version);
|
||||
org.apache.lucene.util.Version oldestLuceneSegment = indexShard.upgrade(request.upgradeRequest());
|
||||
// We are using the current version of elasticsearch as upgrade version since we update mapping to match the current version
|
||||
return new ShardUpgradeResponse(request.shardId(), indexShard.routingEntry().primary(), Version.CURRENT, oldestLuceneSegment);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -37,13 +39,13 @@ import static com.google.common.collect.Maps.newHashMap;
|
|||
*/
|
||||
public class UpgradeResponse extends BroadcastResponse {
|
||||
|
||||
private Map<String, String> versions;
|
||||
private Map<String, Tuple<Version, String>> versions;
|
||||
|
||||
UpgradeResponse() {
|
||||
|
||||
}
|
||||
|
||||
UpgradeResponse(Map<String, String> versions, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
UpgradeResponse(Map<String, Tuple<Version, String>> versions, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.versions = versions;
|
||||
}
|
||||
|
@ -55,8 +57,9 @@ public class UpgradeResponse extends BroadcastResponse {
|
|||
versions = newHashMap();
|
||||
for (int i=0; i<size; i++) {
|
||||
String index = in.readString();
|
||||
String version = in.readString();
|
||||
versions.put(index, version);
|
||||
Version upgradeVersion = Version.readVersion(in);
|
||||
String oldestLuceneSegment = in.readString();
|
||||
versions.put(index, new Tuple<>(upgradeVersion, oldestLuceneSegment));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,13 +67,18 @@ public class UpgradeResponse extends BroadcastResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(versions.size());
|
||||
for(Map.Entry<String, String> entry : versions.entrySet()) {
|
||||
for(Map.Entry<String, Tuple<Version, String>> entry : versions.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
Version.writeVersion(entry.getValue().v1(), out);
|
||||
out.writeString(entry.getValue().v2());
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> versions() {
|
||||
/**
|
||||
* Returns the highest upgrade version of the node that performed metadata upgrade and the
|
||||
* the version of the oldest lucene segment for each index that was upgraded.
|
||||
*/
|
||||
public Map<String, Tuple<Version, String>> versions() {
|
||||
return versions;
|
||||
}
|
||||
}
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -28,7 +30,7 @@ import java.util.Map;
|
|||
*/
|
||||
public class UpgradeSettingsClusterStateUpdateRequest extends ClusterStateUpdateRequest<UpgradeSettingsClusterStateUpdateRequest> {
|
||||
|
||||
private Map<String, String> versions;
|
||||
private Map<String, Tuple<Version, String>> versions;
|
||||
|
||||
public UpgradeSettingsClusterStateUpdateRequest() {
|
||||
|
||||
|
@ -37,14 +39,14 @@ public class UpgradeSettingsClusterStateUpdateRequest extends ClusterStateUpdate
|
|||
/**
|
||||
* Returns the index to version map for indices that should be updated
|
||||
*/
|
||||
public Map<String, String> versions() {
|
||||
public Map<String, Tuple<Version, String>> versions() {
|
||||
return versions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index to version map for indices that should be updated
|
||||
*/
|
||||
public UpgradeSettingsClusterStateUpdateRequest versions(Map<String, String> versions) {
|
||||
public UpgradeSettingsClusterStateUpdateRequest versions(Map<String, Tuple<Version, String>> versions) {
|
||||
this.versions = versions;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -35,16 +37,17 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
*/
|
||||
public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsRequest> {
|
||||
|
||||
|
||||
private Map<String, String> versions;
|
||||
private Map<String, Tuple<Version, String>> versions;
|
||||
|
||||
UpgradeSettingsRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new request to update minimum compatible version settings for one or more indices
|
||||
*
|
||||
* @param versions a map from index name to elasticsearch version, oldest lucene segment version tuple
|
||||
*/
|
||||
public UpgradeSettingsRequest(Map<String, String> versions) {
|
||||
public UpgradeSettingsRequest(Map<String, Tuple<Version, String>> versions) {
|
||||
this.versions = versions;
|
||||
}
|
||||
|
||||
|
@ -59,14 +62,14 @@ public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsR
|
|||
}
|
||||
|
||||
|
||||
Map<String, String> versions() {
|
||||
Map<String, Tuple<Version, String>> versions() {
|
||||
return versions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index versions to be updated
|
||||
*/
|
||||
public UpgradeSettingsRequest versions(Map<String, String> versions) {
|
||||
public UpgradeSettingsRequest versions(Map<String, Tuple<Version, String>> versions) {
|
||||
this.versions = versions;
|
||||
return this;
|
||||
}
|
||||
|
@ -79,8 +82,9 @@ public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsR
|
|||
versions = newHashMap();
|
||||
for (int i=0; i<size; i++) {
|
||||
String index = in.readString();
|
||||
String version = in.readString();
|
||||
versions.put(index, version);
|
||||
Version upgradeVersion = Version.readVersion(in);
|
||||
String oldestLuceneSegment = in.readString();
|
||||
versions.put(index, new Tuple<>(upgradeVersion, oldestLuceneSegment));
|
||||
}
|
||||
readTimeout(in);
|
||||
}
|
||||
|
@ -89,9 +93,10 @@ public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsR
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(versions.size());
|
||||
for(Map.Entry<String, String> entry : versions.entrySet()) {
|
||||
for(Map.Entry<String, Tuple<Version, String>> entry : versions.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
Version.writeVersion(entry.getValue().v1(), out);
|
||||
out.writeString(entry.getValue().v2());
|
||||
}
|
||||
writeTimeout(out);
|
||||
}
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -36,7 +38,7 @@ public class UpgradeSettingsRequestBuilder extends AcknowledgedRequestBuilder<Up
|
|||
/**
|
||||
* Sets the index versions to be updated
|
||||
*/
|
||||
public UpgradeSettingsRequestBuilder setVersions(Map<String, String> versions) {
|
||||
public UpgradeSettingsRequestBuilder setVersions(Map<String, Tuple<Version, String>> versions) {
|
||||
request.versions(versions);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -211,12 +211,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
|
||||
if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) {
|
||||
try {
|
||||
indexShard.sync(location);
|
||||
} catch (EngineClosedException e) {
|
||||
// ignore, the engine is already closed and we do not want the
|
||||
// operation to be retried, because it has been modified
|
||||
}
|
||||
indexShard.sync(location);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.routing.DjbHashFunction;
|
||||
import org.elasticsearch.cluster.routing.HashFunction;
|
||||
|
@ -27,6 +29,12 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -45,11 +53,12 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
|
||||
private final Class<? extends HashFunction> pre20HashFunction;
|
||||
private final Boolean pre20UseType;
|
||||
private final ScriptService scriptService;
|
||||
|
||||
@Inject
|
||||
public MetaDataIndexUpgradeService(Settings settings) {
|
||||
public MetaDataIndexUpgradeService(Settings settings, ScriptService scriptService) {
|
||||
super(settings);
|
||||
|
||||
this.scriptService = scriptService;
|
||||
final String pre20HashFunctionName = settings.get(DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, null);
|
||||
final boolean hasCustomPre20HashFunction = pre20HashFunctionName != null;
|
||||
// the hash function package has changed we replace the two hash functions if their fully qualified name is used.
|
||||
|
@ -83,12 +92,24 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
*/
|
||||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) {
|
||||
// Throws an exception if there are too-old segments:
|
||||
if (isUpgraded(indexMetaData)) {
|
||||
return indexMetaData;
|
||||
}
|
||||
checkSupportedVersion(indexMetaData);
|
||||
IndexMetaData newMetaData = upgradeLegacyRoutingSettings(indexMetaData);
|
||||
newMetaData = addDefaultUnitsIfNeeded(newMetaData);
|
||||
checkMappingsCompatibility(newMetaData);
|
||||
newMetaData = markAsUpgraded(newMetaData);
|
||||
return newMetaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
|
||||
*/
|
||||
private boolean isUpgraded(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.upgradeVersion().onOrAfter(Version.V_2_0_0_beta1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Elasticsearch 2.0 no longer supports indices with pre Lucene v4.0 (Elasticsearch v 0.90.0) segments. All indices
|
||||
* that were created before Elasticsearch v0.90.0 should be upgraded using upgrade plugin before they can
|
||||
|
@ -239,4 +260,66 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
// No changes:
|
||||
return indexMetaData;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks the mappings for compatibility with the current version
|
||||
*/
|
||||
private void checkMappingsCompatibility(IndexMetaData indexMetaData) {
|
||||
Index index = new Index(indexMetaData.getIndex());
|
||||
Settings settings = indexMetaData.settings();
|
||||
try {
|
||||
SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings);
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(index, settings)) {
|
||||
try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService, scriptService)) {
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
// Wrap the inner exception so we have the index name in the exception message
|
||||
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "], reason: [" + ex.getMessage() + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks index as upgraded so we don't have to test it again
|
||||
*/
|
||||
private IndexMetaData markAsUpgraded(IndexMetaData indexMetaData) {
|
||||
Settings settings = Settings.builder().put(indexMetaData.settings()).put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build();
|
||||
return IndexMetaData.builder(indexMetaData).settings(settings).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* A fake analysis server that returns the same keyword analyzer for all requests
|
||||
*/
|
||||
private static class FakeAnalysisService extends AnalysisService {
|
||||
|
||||
private Analyzer fakeAnalyzer = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
throw new UnsupportedOperationException("shouldn't be here");
|
||||
}
|
||||
};
|
||||
|
||||
public FakeAnalysisService(Index index, Settings indexSettings) {
|
||||
super(index, indexSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NamedAnalyzer analyzer(String name) {
|
||||
return new NamedAnalyzer(name, fakeAnalyzer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
fakeAnalyzer.close();
|
||||
super.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -334,7 +335,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
|
||||
for (Map.Entry<String, String> entry : request.versions().entrySet()) {
|
||||
for (Map.Entry<String, Tuple<Version, String>> entry : request.versions().entrySet()) {
|
||||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData != null) {
|
||||
|
@ -342,8 +343,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// No reason to pollute the settings, we didn't really upgrade anything
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(settingsBuilder().put(indexMetaData.settings())
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue().v2())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, entry.getValue().v1())
|
||||
)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableMap;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -48,10 +49,17 @@ public interface BlobContainer {
|
|||
/**
|
||||
* Deletes a blob with giving name.
|
||||
*
|
||||
* If blob exist but cannot be deleted an exception has to be thrown.
|
||||
* If a blob exists but cannot be deleted an exception has to be thrown.
|
||||
*/
|
||||
void deleteBlob(String blobName) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes blobs with giving names.
|
||||
*
|
||||
* If a blob exists but cannot be deleted an exception has to be thrown.
|
||||
*/
|
||||
void deleteBlobs(Collection<String> blobNames) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes all blobs in the container that match the specified prefix.
|
||||
*/
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData;
|
|||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -50,4 +51,11 @@ public abstract class AbstractBlobContainer implements BlobContainer {
|
|||
deleteBlob(blob.name());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobs(Collection<String> blobNames) throws IOException {
|
||||
for(String blob: blobNames) {
|
||||
deleteBlob(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.common.io;
|
|||
import com.google.common.base.Charsets;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
||||
import java.io.*;
|
||||
|
@ -184,34 +183,6 @@ public abstract class Streams {
|
|||
return out.toString();
|
||||
}
|
||||
|
||||
public static String copyToStringFromClasspath(ClassLoader classLoader, String path) throws IOException {
|
||||
InputStream is = classLoader.getResourceAsStream(path);
|
||||
if (is == null) {
|
||||
throw new FileNotFoundException("Resource [" + path + "] not found in classpath with class loader [" + classLoader + "]");
|
||||
}
|
||||
return copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
}
|
||||
|
||||
public static String copyToStringFromClasspath(String path) throws IOException {
|
||||
InputStream is = Streams.class.getResourceAsStream(path);
|
||||
if (is == null) {
|
||||
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
|
||||
}
|
||||
return copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
}
|
||||
|
||||
public static byte[] copyToBytesFromClasspath(String path) throws IOException {
|
||||
try (InputStream is = Streams.class.getResourceAsStream(path)) {
|
||||
if (is == null) {
|
||||
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
|
||||
}
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
copy(is, out);
|
||||
return out.bytes().toBytes();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static int readFully(Reader reader, char[] dest) throws IOException {
|
||||
return readFully(reader, dest, 0, dest.length);
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@ import org.elasticsearch.indices.TypeMissingException;
|
|||
import org.elasticsearch.percolator.PercolatorService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -78,7 +79,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class MapperService extends AbstractIndexComponent {
|
||||
public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
|
||||
public static final String DEFAULT_MAPPING = "_default_";
|
||||
private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(
|
||||
|
|
|
@ -1403,9 +1403,11 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
* Syncs the given location with the underlying storage unless already synced.
|
||||
*/
|
||||
public void sync(Translog.Location location) {
|
||||
final Engine engine = engine();
|
||||
try {
|
||||
final Engine engine = engine();
|
||||
engine.getTranslog().ensureSynced(location);
|
||||
} catch (EngineClosedException ex) {
|
||||
// that's fine since we already synced everything on engine close - this also is conform with the methods documentation
|
||||
} catch (IOException ex) { // if this fails we are in deep shit - fail the request
|
||||
logger.debug("failed to sync translog", ex);
|
||||
throw new ElasticsearchException("failed to sync translog", ex);
|
||||
|
|
|
@ -352,33 +352,38 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
*/
|
||||
protected void finalize(List<SnapshotFiles> snapshots, int fileListGeneration, Map<String, BlobMetaData> blobs) {
|
||||
BlobStoreIndexShardSnapshots newSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
|
||||
List<String> blobsToDelete = newArrayList();
|
||||
// delete old index files first
|
||||
for (String blobName : blobs.keySet()) {
|
||||
// delete old file lists
|
||||
if (indexShardSnapshotsFormat.isTempBlobName(blobName) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) {
|
||||
try {
|
||||
blobContainer.deleteBlob(blobName);
|
||||
} catch (IOException e) {
|
||||
// We cannot delete index file - this is fatal, we cannot continue, otherwise we might end up
|
||||
// with references to non-existing files
|
||||
throw new IndexShardSnapshotFailedException(shardId, "error deleting index file [{}] during cleanup", e);
|
||||
}
|
||||
blobsToDelete.add(blobName);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
blobContainer.deleteBlobs(blobsToDelete);
|
||||
} catch (IOException e) {
|
||||
// We cannot delete index file - this is fatal, we cannot continue, otherwise we might end up
|
||||
// with references to non-existing files
|
||||
throw new IndexShardSnapshotFailedException(shardId, "error deleting index files during cleanup, reason: " + e.getMessage(), e);
|
||||
}
|
||||
|
||||
blobsToDelete = newArrayList();
|
||||
// now go over all the blobs, and if they don't exists in a snapshot, delete them
|
||||
for (String blobName : blobs.keySet()) {
|
||||
// delete old file lists
|
||||
// delete unused files
|
||||
if (blobName.startsWith(DATA_BLOB_PREFIX)) {
|
||||
if (newSnapshots.findNameFile(FileInfo.canonicalName(blobName)) == null) {
|
||||
try {
|
||||
blobContainer.deleteBlob(blobName);
|
||||
} catch (IOException e) {
|
||||
logger.debug("[{}] [{}] error deleting blob [{}] during cleanup", e, snapshotId, shardId, blobName);
|
||||
}
|
||||
blobsToDelete.add(blobName);
|
||||
}
|
||||
}
|
||||
}
|
||||
try {
|
||||
blobContainer.deleteBlobs(blobsToDelete);
|
||||
} catch (IOException e) {
|
||||
logger.debug("[{}] [{}] error deleting some of the blobs [{}] during cleanup", e, snapshotId, shardId, blobsToDelete);
|
||||
}
|
||||
|
||||
// If we deleted all snapshots - we don't need to create the index file
|
||||
if (snapshots.size() > 0) {
|
||||
|
|
|
@ -19,11 +19,13 @@
|
|||
|
||||
package org.elasticsearch.rest.action.admin.indices.upgrade;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -86,8 +88,11 @@ public class RestUpgradeAction extends BaseRestHandler {
|
|||
builder.startObject();
|
||||
buildBroadcastShardsHeader(builder, request, response);
|
||||
builder.startObject("upgraded_indices");
|
||||
for (Map.Entry<String, String> entry : response.versions().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
for (Map.Entry<String, Tuple<Version, String>> entry : response.versions().entrySet()) {
|
||||
builder.startObject(entry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("upgrade_version", entry.getValue().v1());
|
||||
builder.field("oldest_lucene_segment_version", entry.getValue().v2());
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
|
|
|
@ -326,7 +326,7 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getDocs().getDeleted());
|
||||
|
||||
table.addCell(indexMetaData.creationDate());
|
||||
table.addCell(new DateTime(indexMetaData.creationDate(), DateTimeZone.getDefault()));
|
||||
table.addCell(new DateTime(indexMetaData.creationDate(), DateTimeZone.UTC));
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getStore().size());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getStore().size());
|
||||
|
|
|
@ -207,6 +207,7 @@ public class VersionTests extends ESTestCase {
|
|||
if (maxBranchVersion == null) {
|
||||
maxBranchVersions.put(branchName, v);
|
||||
} else if (v.after(maxBranchVersion)) {
|
||||
|
||||
assertFalse("Version " + maxBranchVersion + " cannot be a snapshot because version " + v + " exists", maxBranchVersion.snapshot());
|
||||
maxBranchVersions.put(branchName, v);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices.upgrade;
|
||||
package org.elasticsearch.action.admin.indices.upgrade;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
|
@ -17,15 +17,17 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices.upgrade;
|
||||
package org.elasticsearch.action.admin.indices.upgrade;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bwcompat.StaticIndexBackwardCompatibilityIT;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class UpgradeReallyOldIndexIT extends StaticIndexBackwardCompatibilityIT {
|
||||
|
||||
|
@ -38,11 +40,25 @@ public class UpgradeReallyOldIndexIT extends StaticIndexBackwardCompatibilityIT
|
|||
assertTrue(UpgradeIT.hasAncientSegments(client(), indexName));
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexName).setUpgradeOnlyAncientSegments(true).get());
|
||||
|
||||
assertFalse(UpgradeIT.hasAncientSegments(client(), "index-0.90.6"));
|
||||
assertFalse(UpgradeIT.hasAncientSegments(client(), indexName));
|
||||
// This index has only ancient segments, so it should now be fully upgraded:
|
||||
UpgradeIT.assertUpgraded(client(), indexName);
|
||||
assertEquals(Version.CURRENT.luceneVersion.toString(), client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE));
|
||||
assertMinVersion(indexName, Version.CURRENT.luceneVersion);
|
||||
|
||||
assertEquals(client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, IndexMetaData.SETTING_VERSION_UPGRADED), Integer.toString(Version.CURRENT.id));
|
||||
}
|
||||
|
||||
public void testUpgradeConflictingMapping() throws Exception {
|
||||
String indexName = "index-conflicting-mappings-1.7.0";
|
||||
logger.info("Checking static index " + indexName);
|
||||
Settings nodeSettings = prepareBackwardsDataDir(getDataPath(indexName + ".zip"));
|
||||
try {
|
||||
internalCluster().startNode(nodeSettings);
|
||||
fail("Should have failed to start the node");
|
||||
} catch (Exception ex) {
|
||||
assertThat(ex.getMessage(), containsString("conflicts with existing mapping in other types"));
|
||||
}
|
||||
}
|
||||
|
||||
private void assertMinVersion(String index, org.apache.lucene.util.Version version) {
|
|
@ -26,7 +26,7 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
|
||||
public class BulkIntegrationIT extends ESIntegTestCase {
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.action.fieldstats;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
|
||||
import static org.elasticsearch.action.fieldstats.IndexConstraint.Comparison.*;
|
||||
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MAX;
|
||||
|
@ -31,7 +31,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
public class FieldStatsRequestTest extends ESTestCase {
|
||||
|
||||
public void testFieldsParsing() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/fieldstats/fieldstats-index-constraints-request.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/fieldstats/fieldstats-index-constraints-request.json");
|
||||
FieldStatsRequest request = new FieldStatsRequest();
|
||||
request.source(new BytesArray(data));
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.elasticsearch.action.percolate;
|
|||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Test;
|
||||
|
@ -35,7 +35,7 @@ public class MultiPercolatorRequestTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseBulkRequests() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json");
|
||||
MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length);
|
||||
|
||||
assertThat(request.requests().size(), equalTo(8));
|
||||
|
@ -152,7 +152,7 @@ public class MultiPercolatorRequestTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void testParseBulkRequests_defaults() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate2.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate2.json");
|
||||
MultiPercolateRequest request = new MultiPercolateRequest();
|
||||
request.indices("my-index1").documentType("my-type1").indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
request.add(data, 0, data.length);
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -38,7 +38,7 @@ public class MultiSearchRequestTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void simpleAdd() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json");
|
||||
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
|
||||
assertThat(request.requests().size(), equalTo(8));
|
||||
assertThat(request.requests().get(0).indices()[0], equalTo("test"));
|
||||
|
@ -64,7 +64,7 @@ public class MultiSearchRequestTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void simpleAdd2() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json");
|
||||
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
|
||||
assertThat(request.requests().size(), equalTo(5));
|
||||
assertThat(request.requests().get(0).indices()[0], equalTo("test"));
|
||||
|
@ -82,7 +82,7 @@ public class MultiSearchRequestTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void simpleAdd3() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json");
|
||||
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
|
||||
assertThat(request.requests().size(), equalTo(4));
|
||||
assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
|
||||
|
@ -101,7 +101,7 @@ public class MultiSearchRequestTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void simpleAdd4() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json");
|
||||
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
|
||||
assertThat(request.requests().size(), equalTo(3));
|
||||
assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.store.Directory;
|
|||
import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -43,6 +42,7 @@ import org.elasticsearch.index.mapper.core.TypeParsers;
|
|||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -292,13 +292,13 @@ public class TermVectorsUnitTests extends ESTestCase {
|
|||
|
||||
@Test
|
||||
public void testMultiParser() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest1.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest1.json");
|
||||
BytesReference bytes = new BytesArray(data);
|
||||
MultiTermVectorsRequest request = new MultiTermVectorsRequest();
|
||||
request.add(new TermVectorsRequest(), bytes);
|
||||
checkParsedParameters(request);
|
||||
|
||||
data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest2.json");
|
||||
data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest2.json");
|
||||
bytes = new BytesArray(data);
|
||||
request = new MultiTermVectorsRequest();
|
||||
request.add(new TermVectorsRequest(), bytes);
|
||||
|
@ -328,7 +328,7 @@ public class TermVectorsUnitTests extends ESTestCase {
|
|||
|
||||
@Test // issue #12311
|
||||
public void testMultiParserFilter() throws Exception {
|
||||
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest3.json");
|
||||
byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest3.json");
|
||||
BytesReference bytes = new BytesArray(data);
|
||||
MultiTermVectorsRequest request = new MultiTermVectorsRequest();
|
||||
request.add(new TermVectorsRequest(), bytes);
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
|
@ -42,7 +43,6 @@ import org.elasticsearch.index.engine.EngineConfig;
|
|||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.rest.action.admin.indices.upgrade.UpgradeIT;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
|
|
|
@ -21,11 +21,10 @@ package org.elasticsearch.common.cli;
|
|||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
|
@ -161,7 +160,7 @@ public abstract class CliToolTestCase extends ESTestCase {
|
|||
}
|
||||
assertThat(nonEmptyLines, hasSize(greaterThan(0)));
|
||||
|
||||
String expectedDocs = Streams.copyToStringFromClasspath(classPath);
|
||||
String expectedDocs = StreamsUtils.copyToStringFromClasspath(classPath);
|
||||
for (String nonEmptyLine : nonEmptyLines) {
|
||||
assertThat(expectedDocs, containsString(nonEmptyLine.replaceAll(System.lineSeparator(), "")));
|
||||
}
|
||||
|
|
|
@ -56,15 +56,14 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.elasticsearch.index.mapper.ParsedDocument;
|
|||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
|
|||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,8 +31,8 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
|
|||
import org.hamcrest.Matchers;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
|
|
|
@ -47,8 +47,8 @@ import java.util.Collections;
|
|||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doc;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.rootObject;
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.junit.Test;
|
|||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.junit.Test;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.*;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.junit.Test;
|
|||
|
||||
import java.util.HashMap;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.junit.Test;
|
|||
import java.io.IOException;
|
||||
import java.util.LinkedHashMap;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ import org.junit.Test;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ import org.junit.Test;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
|
|
@ -73,8 +73,8 @@ import java.util.Arrays;
|
|||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.*;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.junit.Test;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
|
|
@ -55,7 +55,7 @@ import java.io.IOException;
|
|||
import java.util.*;
|
||||
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.*;
|
||||
|
|
|
@ -37,7 +37,7 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class ItemSerializationTests extends ESTestCase {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.blobstore.BlobPath;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -63,6 +64,11 @@ public class BlobContainerWrapper implements BlobContainer {
|
|||
delegate.deleteBlob(blobName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobs(Collection<String> blobNames) throws IOException {
|
||||
delegate.deleteBlobs(blobNames);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
|
||||
delegate.deleteBlobsByPrefix(blobNamePrefix);
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.test;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
||||
public class StreamsUtils {
|
||||
|
||||
public static String copyToStringFromClasspath(ClassLoader classLoader, String path) throws IOException {
|
||||
InputStream is = classLoader.getResourceAsStream(path);
|
||||
if (is == null) {
|
||||
throw new FileNotFoundException("Resource [" + path + "] not found in classpath with class loader [" + classLoader + "]");
|
||||
}
|
||||
return Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
}
|
||||
|
||||
public static String copyToStringFromClasspath(String path) throws IOException {
|
||||
InputStream is = Streams.class.getResourceAsStream(path);
|
||||
if (is == null) {
|
||||
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
|
||||
}
|
||||
return Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
}
|
||||
|
||||
public static byte[] copyToBytesFromClasspath(String path) throws IOException {
|
||||
try (InputStream is = Streams.class.getResourceAsStream(path)) {
|
||||
if (is == null) {
|
||||
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
|
||||
}
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
Streams.copy(is, out);
|
||||
return out.bytes().toBytes();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
|
@ -251,27 +251,20 @@ def build_release(release_version, run_tests=False, dry_run=True, cpus=1, bwc_ve
|
|||
print('Running Backwards compatibility tests against version [%s]' % (bwc_version))
|
||||
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
|
||||
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
|
||||
gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true -Drpm.sign=true' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE'))
|
||||
# dont sign the RPM, so older distros will be able to use the uploaded RPM package
|
||||
gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true -Drpm.sign=false' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE'))
|
||||
if env.get('GPG_KEYRING'):
|
||||
gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING')
|
||||
run_mvn('clean %s -DskipTests %s' % (target, gpg_args))
|
||||
success = False
|
||||
try:
|
||||
# create unsigned RPM first for downloads.elasticsearch.org
|
||||
run_mvn('-DskipTests rpm:rpm')
|
||||
# move unsigned RPM to target/releases
|
||||
# this is an oddness of RPM that is attaches -1 so we have to rename it
|
||||
rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s-1.noarch.rpm' % release_version)
|
||||
# create additional signed RPM for the repositories
|
||||
run_mvn('-f distribution/rpm/pom.xml package -DskipTests -Dsign.rpm=true -Drpm.outputDirectory=target/releases/signed/ %s' % (gpg_args))
|
||||
rpm = os.path.join('target/releases/signed', 'elasticsearch-%s.rpm' % release_version)
|
||||
if os.path.isfile(rpm):
|
||||
log('RPM [%s] contains: ' % rpm)
|
||||
log('Signed RPM [%s] contains: ' % rpm)
|
||||
run('rpm -pqli %s' % rpm)
|
||||
renamed_rpm = os.path.join('target/releases/', 'elasticsearch-%s.noarch.rpm' % release_version)
|
||||
shutil.move(rpm, renamed_rpm)
|
||||
else:
|
||||
raise RuntimeError('Could not find required RPM at %s' % rpm)
|
||||
# now create signed RPM for repositories
|
||||
run_mvn('-DskipTests rpm:rpm %s' % (gpg_args))
|
||||
success = True
|
||||
success = True
|
||||
finally:
|
||||
if not success:
|
||||
print("""
|
||||
|
@ -358,63 +351,44 @@ def find_release_version(src_branch):
|
|||
return match.group(1)
|
||||
raise RuntimeError('Could not find release version in branch %s' % src_branch)
|
||||
|
||||
def artifact_names(release, path = ''):
|
||||
artifacts = [os.path.join(path, 'elasticsearch-%s.%s' % (release, t)) for t in ['deb', 'tar.gz', 'zip']]
|
||||
artifacts.append(os.path.join(path, 'elasticsearch-%s.noarch.rpm' % (release)))
|
||||
def artifact_names(release):
|
||||
artifacts = []
|
||||
artifacts.append(os.path.join('distribution/zip/target/releases', 'elasticsearch-%s.zip' % (release)))
|
||||
artifacts.append(os.path.join('distribution/tar/target/releases', 'elasticsearch-%s.tar.gz' % (release)))
|
||||
artifacts.append(os.path.join('distribution/deb/target/releases', 'elasticsearch-%s.deb' % (release)))
|
||||
artifacts.append(os.path.join('distribution/rpm/target/releases', 'elasticsearch-%s.rpm' % (release)))
|
||||
return artifacts
|
||||
|
||||
def get_artifacts(release):
|
||||
common_artifacts = artifact_names(release, 'target/releases/')
|
||||
common_artifacts = artifact_names(release)
|
||||
for f in common_artifacts:
|
||||
if not os.path.isfile(f):
|
||||
raise RuntimeError('Could not find required artifact at %s' % f)
|
||||
return common_artifacts
|
||||
|
||||
# Checks the jar files in each package
|
||||
# Barfs if any of the package jar files differ
|
||||
def check_artifacts_for_same_jars(artifacts):
|
||||
jars = []
|
||||
for file in artifacts:
|
||||
if file.endswith('.zip'):
|
||||
jars.append(subprocess.check_output("unzip -l %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
|
||||
if file.endswith('.tar.gz'):
|
||||
jars.append(subprocess.check_output("tar tzvf %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
|
||||
if file.endswith('.rpm'):
|
||||
jars.append(subprocess.check_output("rpm -pqli %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
|
||||
if file.endswith('.deb'):
|
||||
jars.append(subprocess.check_output("dpkg -c %s | grep '\.jar$' | awk -F '/' '{ print $NF }' | sort" % file, shell=True))
|
||||
if len(set(jars)) != 1:
|
||||
raise RuntimeError('JAR contents of packages are not the same, please check the package contents. Use [unzip -l], [tar tzvf], [dpkg -c], [rpm -pqli] to inspect')
|
||||
|
||||
# Generates sha1 checsums for all files
|
||||
# and returns the checksum files as well
|
||||
# as the given files in a list
|
||||
def generate_checksums(files):
|
||||
res = []
|
||||
for release_file in files:
|
||||
directory = os.path.dirname(release_file)
|
||||
file = os.path.basename(release_file)
|
||||
checksum_file = '%s.sha1.txt' % file
|
||||
|
||||
if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)):
|
||||
raise RuntimeError('Failed to generate checksum for file %s' % release_file)
|
||||
res = res + [os.path.join(directory, checksum_file), release_file]
|
||||
return res
|
||||
|
||||
def download_and_verify(release, files, plugins=None, base_url='https://download.elastic.co/elasticsearch/elasticsearch'):
|
||||
# Sample URL:
|
||||
# http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/elasticsearch-rpm/2.0.0-beta1-SNAPSHOT/elasticsearch-rpm-2.0.0-beta1-SNAPSHOT.rpm
|
||||
def download_and_verify(release, files, plugins=None, base_url='https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution'):
|
||||
print('Downloading and verifying release %s from %s' % (release, base_url))
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
downloaded_files = []
|
||||
for file in files:
|
||||
name = os.path.basename(file)
|
||||
url = '%s/%s' % (base_url, name)
|
||||
if name.endswith('tar.gz'):
|
||||
url = '%s/tar/elasticsearch/%s/%s' % (base_url, release, name)
|
||||
elif name.endswith('zip'):
|
||||
url = '%s/zip/elasticsearch/%s/%s' % (base_url, release, name)
|
||||
elif name.endswith('rpm'):
|
||||
url = '%s/rpm/elasticsearch/%s/%s' % (base_url, release, name)
|
||||
elif name.endswith('deb'):
|
||||
url = '%s/deb/elasticsearch/%s/%s' % (base_url, release, name)
|
||||
abs_file_path = os.path.join(tmp_dir, name)
|
||||
print(' Downloading %s' % (url))
|
||||
downloaded_files.append(abs_file_path)
|
||||
urllib.request.urlretrieve(url, abs_file_path)
|
||||
url = ''.join([url, '.sha1.txt'])
|
||||
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1.txt']))
|
||||
url = ''.join([url, '.sha1'])
|
||||
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1']))
|
||||
urllib.request.urlretrieve(url, checksum_file)
|
||||
print(' Verifying checksum %s' % (checksum_file))
|
||||
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
|
||||
|
@ -444,10 +418,7 @@ def smoke_test_release(release, files, expected_hash, plugins):
|
|||
run('%s; %s install %s' % (java_exe(), es_plugin_path, plugin))
|
||||
plugin_names[name] = True
|
||||
|
||||
if release.startswith("0.90."):
|
||||
background = '' # 0.90.x starts in background automatically
|
||||
else:
|
||||
background = '-d'
|
||||
background = '-d'
|
||||
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
|
||||
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -Des.script.inline=on -Des.script.indexed=on %s'
|
||||
% (java_exe(), es_run_path, background))
|
||||
|
@ -505,21 +476,11 @@ def merge_tag_push(remote, src_branch, release_version, dry_run):
|
|||
else:
|
||||
print(' dryrun [True] -- skipping push to remote %s' % remote)
|
||||
|
||||
def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True):
|
||||
location = os.path.dirname(os.path.realpath(__file__))
|
||||
for artifact in artifacts:
|
||||
if dry_run:
|
||||
print('Skip Uploading %s to Amazon S3' % artifact)
|
||||
else:
|
||||
print('Uploading %s to Amazon S3' % artifact)
|
||||
# requires boto to be installed but it is not available on python3k yet so we use a dedicated tool
|
||||
run('python %s/upload-s3.py --file %s ' % (location, os.path.abspath(artifact)))
|
||||
|
||||
def publish_repositories(version, dry_run=True):
|
||||
if dry_run:
|
||||
print('Skipping package repository update')
|
||||
else:
|
||||
print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version)
|
||||
print('Triggering repository update for version %s - calling dev-tools/build_repositories.sh %s' % (version, src_branch))
|
||||
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
|
||||
run('dev-tools/build_repositories.sh %s' % src_branch)
|
||||
|
||||
|
@ -756,22 +717,17 @@ if __name__ == '__main__':
|
|||
print('Building Release candidate')
|
||||
input('Press Enter to continue...')
|
||||
if not dry_run:
|
||||
print(' Running maven builds now and publish to Sonatype - run-tests [%s]' % run_tests)
|
||||
print(' Running maven builds now and publish to Sonatype and S3 - run-tests [%s]' % run_tests)
|
||||
else:
|
||||
print(' Running maven builds now run-tests [%s]' % run_tests)
|
||||
build_release(release_version, run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
|
||||
artifacts = get_artifacts(release_version)
|
||||
print('Checking if all artifacts contain the same jars')
|
||||
check_artifacts_for_same_jars(artifacts)
|
||||
artifacts_and_checksum = generate_checksums(artifacts)
|
||||
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
|
||||
print(''.join(['-' for _ in range(80)]))
|
||||
print('Finish Release -- dry_run: %s' % dry_run)
|
||||
input('Press Enter to continue...')
|
||||
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
|
||||
merge_tag_push(remote, src_branch, release_version, dry_run)
|
||||
print(' publish artifacts to S3 -- dry_run: %s' % dry_run)
|
||||
publish_artifacts(artifacts_and_checksum, dry_run=dry_run)
|
||||
print(' Updating package repositories -- dry_run: %s' % dry_run)
|
||||
publish_repositories(src_branch, dry_run=dry_run)
|
||||
cherry_pick_command = '.'
|
||||
|
|
|
@ -158,8 +158,8 @@ mkdir -p $centosdir
|
|||
echo "RPM: Syncing repository for version $version into $centosdir"
|
||||
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/centos/ $centosdir
|
||||
|
||||
rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm
|
||||
echo "RPM: Copying $rpm into $centosdor"
|
||||
rpm=distribution/rpm/target/releases/signed/elasticsearch*.rpm
|
||||
echo "RPM: Copying signed $rpm into $centosdir"
|
||||
cp $rpm $centosdir
|
||||
|
||||
echo "RPM: Running createrepo in $centosdir"
|
||||
|
@ -176,7 +176,7 @@ $s3cmd sync -P $centosdir/ s3://$S3_BUCKET_SYNC_TO/elasticsearch/$version/centos
|
|||
## DEB
|
||||
###################
|
||||
|
||||
deb=target/releases/elasticsearch*.deb
|
||||
deb=distribution/deb/target/releases/elasticsearch*.deb
|
||||
|
||||
echo "DEB: Creating repository directory structure"
|
||||
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
import create_bwc_index
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
def fetch_version(version):
|
||||
logging.info('fetching ES version %s' % version)
|
||||
if subprocess.call([sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'get-bwc-version.py'), version]) != 0:
|
||||
raise RuntimeError('failed to download ES version %s' % version)
|
||||
|
||||
def main():
|
||||
'''
|
||||
Creates a static back compat index (.zip) with conflicting mappings.
|
||||
'''
|
||||
|
||||
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
|
||||
datefmt='%Y-%m-%d %I:%M:%S %p')
|
||||
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
|
||||
logging.getLogger('urllib3').setLevel(logging.WARN)
|
||||
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
repo_dir = os.path.join(tmp_dir, 'repo')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
logging.info('Temp repo dir: %s' % repo_dir)
|
||||
|
||||
version = '1.7.0'
|
||||
classifier = 'conflicting-mappings-%s' % version
|
||||
index_name = 'index-%s' % classifier
|
||||
|
||||
# Download old ES releases if necessary:
|
||||
release_dir = os.path.join('backwards', 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
fetch_version(version)
|
||||
|
||||
node = create_bwc_index.start_node(version, release_dir, data_dir, repo_dir, cluster_name=index_name)
|
||||
client = create_bwc_index.create_client()
|
||||
|
||||
put_conflicting_mappings(client, index_name)
|
||||
create_bwc_index.shutdown_node(node)
|
||||
print('%s server output:\n%s' % (version, node.stdout.read().decode('utf-8')))
|
||||
node = None
|
||||
create_bwc_index.compress_index(classifier, tmp_dir, 'core/src/test/resources/org/elasticsearch/action/admin/indices/upgrade')
|
||||
finally:
|
||||
if node is not None:
|
||||
create_bwc_index.shutdown_node(node)
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
def put_conflicting_mappings(client, index_name):
|
||||
client.indices.delete(index=index_name, ignore=404)
|
||||
logging.info('Create single shard test index')
|
||||
|
||||
mappings = {}
|
||||
# backwardcompat test for conflicting mappings, see #11857
|
||||
mappings['x'] = {
|
||||
'analyzer': 'standard',
|
||||
"properties": {
|
||||
"foo": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
mappings['y'] = {
|
||||
'analyzer': 'standard',
|
||||
"properties": {
|
||||
"foo": {
|
||||
"type": "date"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client.indices.create(index=index_name, body={
|
||||
'settings': {
|
||||
'number_of_shards': 1,
|
||||
'number_of_replicas': 0
|
||||
},
|
||||
'mappings': mappings
|
||||
})
|
||||
health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
num_docs = random.randint(2000, 3000)
|
||||
create_bwc_index.index_documents(client, index_name, 'doc', num_docs)
|
||||
logging.info('Running basic asserts on the data added')
|
||||
create_bwc_index.run_basic_asserts(client, index_name, 'doc', num_docs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -25,7 +25,9 @@ def main():
|
|||
tmp_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
repo_dir = os.path.join(tmp_dir, 'repo')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
logging.info('Temp repo dir: %s' % repo_dir)
|
||||
|
||||
first_version = '0.20.6'
|
||||
second_version = '0.90.6'
|
||||
|
@ -36,7 +38,7 @@ def main():
|
|||
if not os.path.exists(release_dir):
|
||||
fetch_version(first_version)
|
||||
|
||||
node = create_bwc_index.start_node(first_version, release_dir, data_dir, cluster_name=index_name)
|
||||
node = create_bwc_index.start_node(first_version, release_dir, data_dir, repo_dir, cluster_name=index_name)
|
||||
client = create_bwc_index.create_client()
|
||||
|
||||
# Creates the index & indexes docs w/ first_version:
|
||||
|
@ -63,7 +65,7 @@ def main():
|
|||
fetch_version(second_version)
|
||||
|
||||
# Now also index docs with second_version:
|
||||
node = create_bwc_index.start_node(second_version, release_dir, data_dir, cluster_name=index_name)
|
||||
node = create_bwc_index.start_node(second_version, release_dir, data_dir, repo_dir, cluster_name=index_name)
|
||||
client = create_bwc_index.create_client()
|
||||
|
||||
# If we index too many docs, the random refresh/flush causes the ancient segments to be merged away:
|
||||
|
@ -102,7 +104,7 @@ def main():
|
|||
create_bwc_index.shutdown_node(node)
|
||||
print('%s server output:\n%s' % (second_version, node.stdout.read().decode('utf-8')))
|
||||
node = None
|
||||
create_bwc_index.compress_index('%s-and-%s' % (first_version, second_version), tmp_dir, 'src/test/resources/org/elasticsearch/rest/action/admin/indices/upgrade')
|
||||
create_bwc_index.compress_index('%s-and-%s' % (first_version, second_version), tmp_dir, 'core/src/test/resources/org/elasticsearch/action/admin/indices/upgrade')
|
||||
finally:
|
||||
if node is not None:
|
||||
create_bwc_index.shutdown_node(node)
|
||||
|
|
|
@ -12,10 +12,14 @@
|
|||
<!-- if this exists, ES is running (maybe) -->
|
||||
<available property="integ.pidfile.exists" file="${integ.pidfile}"/>
|
||||
|
||||
<!-- name of our cluster, maybe needs changing -->
|
||||
<property name="integ.cluster.name" value="prepare_release"/>
|
||||
|
||||
<!-- arguments passed to elasticsearch when running -->
|
||||
<property name="integ.args"
|
||||
value="-Des.node.name=smoke_tester -Des.cluster.name=prepare_release
|
||||
value="-Des.node.name=smoke_tester -Des.cluster.name=${integ.cluster.name}
|
||||
-Des.discovery.zen.ping.multicast.enabled=false -Des.script.inline=on
|
||||
-Des.http.port=${integ.http.port} -Des.transport.tcp.port=${integ.transport.port}
|
||||
-Des.script.indexed=on -Des.pidfile=${integ.pidfile} -Des.repositories.url.allowed_urls=http://snapshot.test*"/>
|
||||
|
||||
<!-- runs an OS script -->
|
||||
|
@ -119,12 +123,24 @@
|
|||
<sequential>
|
||||
<echo>Starting up external cluster...</echo>
|
||||
<run-script script="@{home}/bin/elasticsearch" spawn="true"
|
||||
args="@{args} -Des.path.repo=@{home}/repo" />
|
||||
args="@{args} -Des.path.repo=@{home}/repo"/>
|
||||
|
||||
<waitfor maxwait="3" maxwaitunit="minute" checkevery="500">
|
||||
<http url="http://127.0.0.1:9200"/>
|
||||
<local name="failed.to.start"/>
|
||||
<waitfor maxwait="30" maxwaitunit="second"
|
||||
checkevery="500" checkeveryunit="millisecond"
|
||||
timeoutproperty="failed.to.start">
|
||||
<http url="http://127.0.0.1:${integ.http.port}"/>
|
||||
</waitfor>
|
||||
|
||||
<!-- best effort, print console log. useful if it fails especially -->
|
||||
<local name="log.contents"/>
|
||||
<loadfile srcFile="@{home}/logs/${integ.cluster.name}.log"
|
||||
property="log.contents"
|
||||
failonerror="false"/>
|
||||
<echo message="${log.contents}"/>
|
||||
|
||||
<fail message="ES instance did not start" if="failed.to.start"/>
|
||||
|
||||
<local name="integ.pid"/>
|
||||
<extract-pid property="integ.pid"/>
|
||||
<echo>External cluster started PID ${integ.pid}</echo>
|
||||
|
@ -143,9 +159,9 @@
|
|||
<!-- TODO: doesn't belong here, but we will figure it out -->
|
||||
<target name="start-foreground" depends="stop-external-cluster">
|
||||
<delete dir="${integ.scratch}"/>
|
||||
<unzip src="${project.build.directory}/releases/elasticsearch-${project.version}.zip" dest="${integ.scratch}"/>
|
||||
<unzip src="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip" dest="${integ.scratch}"/>
|
||||
<local name="home"/>
|
||||
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
|
||||
<property name="home" location="${integ.scratch}/${project.artifactId}-${elasticsearch.version}"/>
|
||||
<run-script script="${home}/bin/elasticsearch" spawn="false"
|
||||
args="${integ.args} -Des.path.repo=${home}/repo">
|
||||
<nested>
|
||||
|
@ -185,7 +201,7 @@
|
|||
<target name="setup-workspace-zip" depends="stop-external-cluster" unless="${shouldskip}">
|
||||
<sequential>
|
||||
<delete dir="${integ.scratch}"/>
|
||||
<unzip src="${project.build.directory}/releases/elasticsearch-${project.version}.zip"
|
||||
<unzip src="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"
|
||||
dest="${integ.scratch}"/>
|
||||
</sequential>
|
||||
</target>
|
||||
|
@ -200,7 +216,7 @@
|
|||
<target name="setup-workspace-tar" depends="stop-external-cluster" unless="${shouldskip}">
|
||||
<sequential>
|
||||
<delete dir="${integ.scratch}"/>
|
||||
<untar src="${project.build.directory}/releases/elasticsearch-${project.version}.tar.gz"
|
||||
<untar src="${project.build.directory}/releases/${project.artifactId}-${project.version}.tar.gz"
|
||||
dest="${integ.scratch}"
|
||||
compression="gzip"/>
|
||||
</sequential>
|
||||
|
@ -217,7 +233,7 @@
|
|||
<delete dir="${integ.scratch}"/>
|
||||
<mkdir dir="${integ.scratch}/deb-extracted"/>
|
||||
<local name="debfile"/>
|
||||
<property name="debfile" location="${project.build.directory}/releases/elasticsearch-${project.version}.deb"/>
|
||||
<property name="debfile" location="${project.build.directory}/releases/${project.artifactId}-${project.version}.deb"/>
|
||||
<!-- print some basic package info -->
|
||||
<exec executable="dpkg-deb" failonerror="true" taskname="deb-info">
|
||||
<arg value="-I"/>
|
||||
|
@ -244,7 +260,7 @@
|
|||
<local name="rpm.file"/>
|
||||
<local name="rpm.database"/>
|
||||
<local name="rpm.extracted"/>
|
||||
<property name="rpm.file" location="${project.build.directory}/releases/elasticsearch-${project.version}.rpm"/>
|
||||
<property name="rpm.file" location="${project.build.directory}/releases/${project.artifactId}-${project.version}.rpm"/>
|
||||
<property name="rpm.database" location="${integ.scratch}/rpm-database"/>
|
||||
<property name="rpm.extracted" location="${integ.scratch}/rpm-extracted"/>
|
||||
<mkdir dir="${rpm.database}"/>
|
||||
|
|
|
@ -46,13 +46,14 @@ java.nio.file.FileSystems#getDefault() @ use PathUtils.getDefault instead.
|
|||
java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous - use DateTime(DateTimeZone.getDefault()) if you really want the default timezone
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
||||
|
||||
com.google.common.collect.Iterators#emptyIterator() @ Use Collections.emptyIterator instead
|
||||
|
||||
|
|
|
@ -2,41 +2,33 @@
|
|||
|
||||
use strict;
|
||||
use warnings;
|
||||
use v5.10;
|
||||
|
||||
use FindBin qw($RealBin);
|
||||
use lib "$RealBin/lib";
|
||||
use Archive::Ar();
|
||||
use Cwd();
|
||||
use File::Spec();
|
||||
use Digest::SHA qw(sha1);
|
||||
use File::Temp();
|
||||
use File::Find();
|
||||
use Digest::SHA qw(sha1);
|
||||
use File::Basename qw(basename);
|
||||
use Archive::Extract();
|
||||
$Archive::Extract::PREFER_BIN = 1;
|
||||
|
||||
our %Extract_Package = (
|
||||
zip => \&extract_zip,
|
||||
gz => \&extract_tar_gz,
|
||||
rpm => \&extract_rpm,
|
||||
deb => \&extract_deb
|
||||
);
|
||||
|
||||
my $mode = shift(@ARGV) || "";
|
||||
die usage() unless $mode =~ /^--(check|update)$/;
|
||||
|
||||
my $License_Dir = shift(@ARGV) || die usage();
|
||||
my $Package = shift(@ARGV) || die usage();
|
||||
my $Source = shift(@ARGV) || die usage();
|
||||
$License_Dir = File::Spec->rel2abs($License_Dir) . '/';
|
||||
$Package = File::Spec->rel2abs($Package);
|
||||
$Source = File::Spec->rel2abs($Source);
|
||||
|
||||
die "License dir is not a directory: $License_Dir\n" . usage()
|
||||
unless -d $License_Dir;
|
||||
|
||||
die "Package is not a file: $Package\n" . usage()
|
||||
unless -f $Package;
|
||||
my %shas
|
||||
= -f $Source ? jars_from_zip($Source)
|
||||
: -d $Source ? jars_from_dir($Source)
|
||||
: die "Source is neither a directory nor a zip file: $Source" . usage();
|
||||
|
||||
my %shas = get_shas_from_package($Package);
|
||||
$mode eq '--check'
|
||||
? exit check_shas_and_licenses(%shas)
|
||||
: exit write_shas(%shas);
|
||||
|
@ -56,15 +48,15 @@ sub check_shas_and_licenses {
|
|||
for my $jar ( sort keys %new ) {
|
||||
my $old_sha = delete $old{$jar};
|
||||
unless ($old_sha) {
|
||||
say STDERR "$jar: SHA is missing";
|
||||
print STDERR "$jar: SHA is missing\n";
|
||||
$error++;
|
||||
$sha_error++;
|
||||
next;
|
||||
}
|
||||
|
||||
unless ( $old_sha eq $new{$jar} ) {
|
||||
say STDERR
|
||||
"$jar: SHA has changed, expected $old_sha but found $new{$jar}";
|
||||
print STDERR
|
||||
"$jar: SHA has changed, expected $old_sha but found $new{$jar}\n";
|
||||
$error++;
|
||||
$sha_error++;
|
||||
next;
|
||||
|
@ -92,43 +84,49 @@ sub check_shas_and_licenses {
|
|||
}
|
||||
}
|
||||
unless ($license_found) {
|
||||
say STDERR "$jar: LICENSE is missing";
|
||||
print STDERR "$jar: LICENSE is missing\n";
|
||||
$error++;
|
||||
$sha_error++;
|
||||
}
|
||||
unless ($notice_found) {
|
||||
say STDERR "$jar: NOTICE is missing";
|
||||
print STDERR "$jar: NOTICE is missing\n";
|
||||
$error++;
|
||||
}
|
||||
}
|
||||
|
||||
if ( keys %old ) {
|
||||
say STDERR "Extra SHA files present for: " . join ", ", sort keys %old;
|
||||
print STDERR "Extra SHA files present for: " . join ", ",
|
||||
sort keys %old;
|
||||
print "\n";
|
||||
$error++;
|
||||
}
|
||||
|
||||
my @unused_licenses = grep { !$licenses{$_} } keys %licenses;
|
||||
if (@unused_licenses) {
|
||||
say STDERR "Extra LICENCE file present: " . join ", ",
|
||||
$error++;
|
||||
print STDERR "Extra LICENCE file present: " . join ", ",
|
||||
sort @unused_licenses;
|
||||
print "\n";
|
||||
}
|
||||
|
||||
my @unused_notices = grep { !$notices{$_} } keys %notices;
|
||||
if (@unused_notices) {
|
||||
say STDERR "Extra NOTICE file present: " . join ", ",
|
||||
$error++;
|
||||
print STDERR "Extra NOTICE file present: " . join ", ",
|
||||
sort @unused_notices;
|
||||
print "\n";
|
||||
}
|
||||
|
||||
if ($sha_error) {
|
||||
say STDERR <<"SHAS"
|
||||
print STDERR <<"SHAS"
|
||||
|
||||
You can update the SHA files by running:
|
||||
|
||||
$0 --update $License_Dir $Package
|
||||
$0 --update $License_Dir $Source
|
||||
|
||||
SHAS
|
||||
}
|
||||
say "All SHAs and licenses OK" unless $error;
|
||||
print("All SHAs and licenses OK\n") unless $error;
|
||||
return $error;
|
||||
}
|
||||
|
||||
|
@ -141,13 +139,13 @@ sub write_shas {
|
|||
for my $jar ( sort keys %new ) {
|
||||
if ( $old{$jar} ) {
|
||||
next if $old{$jar} eq $new{$jar};
|
||||
say "Updating $jar";
|
||||
print "Updating $jar\n";
|
||||
}
|
||||
else {
|
||||
say "Adding $jar";
|
||||
print "Adding $jar\n";
|
||||
}
|
||||
open my $fh, '>', $License_Dir . $jar or die $!;
|
||||
say $fh $new{$jar} or die $!;
|
||||
print $fh $new{$jar} . "\n" or die $!;
|
||||
close $fh or die $!;
|
||||
}
|
||||
continue {
|
||||
|
@ -155,10 +153,10 @@ sub write_shas {
|
|||
}
|
||||
|
||||
for my $jar ( sort keys %old ) {
|
||||
say "Deleting $jar";
|
||||
print "Deleting $jar\n";
|
||||
unlink $License_Dir . $jar or die $!;
|
||||
}
|
||||
say "SHAs updated";
|
||||
print "SHAs updated\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -194,82 +192,37 @@ sub get_sha_files {
|
|||
}
|
||||
|
||||
#===================================
|
||||
sub get_shas_from_package {
|
||||
sub jars_from_zip {
|
||||
#===================================
|
||||
my $package = shift;
|
||||
my ($type) = ( $package =~ /\.(\w+)$/ );
|
||||
die "Unrecognised package type: $package"
|
||||
unless $type && $Extract_Package{$type};
|
||||
|
||||
my ($source) = @_;
|
||||
my $temp_dir = File::Temp->newdir;
|
||||
my $files
|
||||
= eval { $Extract_Package{$type}->( $package, $temp_dir->dirname ) }
|
||||
or die "Couldn't extract $package: $@";
|
||||
|
||||
my @jars = map {"$temp_dir/$_"}
|
||||
grep { /\.jar$/ && !/elasticsearch[^\/]*$/ } @$files;
|
||||
my $dir_name = $temp_dir->dirname;
|
||||
my $archive = Archive::Extract->new( archive => $source, type => 'zip' );
|
||||
$archive->extract( to => $dir_name ) || die $archive->error;
|
||||
my @jars = map { File::Spec->rel2abs( $_, $dir_name ) }
|
||||
grep { /\.jar$/ && !/elasticsearch[^\/]*$/ } @{ $archive->files };
|
||||
die "No JARS found in: $source\n"
|
||||
unless @jars;
|
||||
return calculate_shas(@jars);
|
||||
}
|
||||
|
||||
#===================================
|
||||
sub extract_zip {
|
||||
sub jars_from_dir {
|
||||
#===================================
|
||||
my ( $package, $dir ) = @_;
|
||||
my $archive = Archive::Extract->new( archive => $package, type => 'zip' );
|
||||
$archive->extract( to => $dir ) || die $archive->error;
|
||||
return $archive->files;
|
||||
}
|
||||
|
||||
#===================================
|
||||
sub extract_tar_gz {
|
||||
#===================================
|
||||
my ( $package, $dir ) = @_;
|
||||
my $archive = Archive::Extract->new( archive => $package, type => 'tgz' );
|
||||
$archive->extract( to => $dir ) || die $archive->error;
|
||||
return $archive->files;
|
||||
}
|
||||
|
||||
#===================================
|
||||
sub extract_rpm {
|
||||
#===================================
|
||||
my ( $package, $dir ) = @_;
|
||||
my $cwd = Cwd::cwd();
|
||||
my @files;
|
||||
eval {
|
||||
chdir $dir;
|
||||
say "Trying with rpm2cpio";
|
||||
my $out = eval {`rpm2cpio '$package' | cpio -idmv --quiet`};
|
||||
unless ($out) {
|
||||
say "Trying with rpm2cpio.pl";
|
||||
$out = eval {`rpm2cpio.pl '$package' | cpio -idmv --quiet`};
|
||||
}
|
||||
@files = split "\n", $out if $out;
|
||||
};
|
||||
chdir $cwd;
|
||||
die $@ if $@;
|
||||
die "Couldn't extract $package\n" unless @files;
|
||||
return \@files;
|
||||
}
|
||||
|
||||
#===================================
|
||||
sub extract_deb {
|
||||
#===================================
|
||||
my ( $package, $dir ) = @_;
|
||||
my $archive = Archive::Ar->new;
|
||||
$archive->read($package) || die $archive->error;
|
||||
my $cwd = Cwd::cwd();
|
||||
eval {
|
||||
chdir $dir;
|
||||
$archive->extract('data.tar.gz') || die $archive->error;
|
||||
};
|
||||
chdir $cwd;
|
||||
die $@ if $@;
|
||||
$archive = Archive::Extract->new(
|
||||
archive => $dir . '/data.tar.gz',
|
||||
type => 'tgz'
|
||||
my $source = shift;
|
||||
my @jars;
|
||||
File::Find::find(
|
||||
{ wanted => sub {
|
||||
push @jars, File::Spec->rel2abs( $_, $source )
|
||||
if /\.jar$/ && !/elasticsearch[^\/]*$/;
|
||||
},
|
||||
no_chdir => 1
|
||||
},
|
||||
$source
|
||||
);
|
||||
$archive->extract( to => $dir ) || die $archive->error;
|
||||
return $archive->files;
|
||||
die "No JARS found in: $source\n"
|
||||
unless @jars;
|
||||
return calculate_shas(@jars);
|
||||
}
|
||||
|
||||
#===================================
|
||||
|
@ -291,11 +244,13 @@ sub usage {
|
|||
|
||||
USAGE:
|
||||
|
||||
# check the sha1 and LICENSE files for each jar in the zip|gz|deb|rpm
|
||||
# check the sha1 and LICENSE files for each jar in the zip or directory
|
||||
$0 --check path/to/licenses/ path/to/package.zip
|
||||
$0 --check path/to/licenses/ path/to/dir/
|
||||
|
||||
# updates the sha1s for each jar in the zip|gz|deb|rpm
|
||||
# updates the sha1s for each jar in the zip or directory
|
||||
$0 --update path/to/licenses/ path/to/package.zip
|
||||
$0 --update path/to/licenses/ path/to/dir/
|
||||
|
||||
USAGE
|
||||
|
||||
|
|
|
@ -1,806 +0,0 @@
|
|||
###########################################################
|
||||
# Archive::Ar - Pure perl module to handle ar achives
|
||||
#
|
||||
# Copyright 2003 - Jay Bonci <jaybonci@cpan.org>
|
||||
# Copyright 2014 - John Bazik <jbazik@cpan.org>
|
||||
# Licensed under the same terms as perl itself
|
||||
#
|
||||
###########################################################
|
||||
package Archive::Ar;
|
||||
|
||||
use base qw(Exporter);
|
||||
our @EXPORT_OK = qw(COMMON BSD GNU);
|
||||
|
||||
use strict;
|
||||
use File::Spec;
|
||||
use Time::Local;
|
||||
use Carp qw(carp longmess);
|
||||
|
||||
use vars qw($VERSION);
|
||||
$VERSION = '2.02';
|
||||
|
||||
use constant CAN_CHOWN => ($> == 0 and $^O ne 'MacOS' and $^O ne 'MSWin32');
|
||||
|
||||
use constant ARMAG => "!<arch>\n";
|
||||
use constant SARMAG => length(ARMAG);
|
||||
use constant ARFMAG => "`\n";
|
||||
use constant AR_EFMT1 => "#1/";
|
||||
|
||||
use constant COMMON => 1;
|
||||
use constant BSD => 2;
|
||||
use constant GNU => 3;
|
||||
|
||||
my $has_io_string;
|
||||
BEGIN {
|
||||
$has_io_string = eval {
|
||||
require IO::String;
|
||||
IO::String->import();
|
||||
1;
|
||||
} || 0;
|
||||
}
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my $file = shift;
|
||||
my $opts = shift || 0;
|
||||
my $self = bless {}, $class;
|
||||
my $defopts = {
|
||||
chmod => 1,
|
||||
chown => 1,
|
||||
same_perms => ($> == 0) ? 1:0,
|
||||
symbols => undef,
|
||||
};
|
||||
$opts = {warn => $opts} unless ref $opts;
|
||||
|
||||
$self->clear();
|
||||
$self->{opts} = {(%$defopts, %{$opts})};
|
||||
if ($file) {
|
||||
return unless $self->read($file);
|
||||
}
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub set_opt {
|
||||
my $self = shift;
|
||||
my $name = shift;
|
||||
my $val = shift;
|
||||
|
||||
$self->{opts}->{$name} = $val;
|
||||
}
|
||||
|
||||
sub get_opt {
|
||||
my $self = shift;
|
||||
my $name = shift;
|
||||
|
||||
return $self->{opts}->{$name};
|
||||
}
|
||||
|
||||
sub type {
|
||||
return shift->{type};
|
||||
}
|
||||
|
||||
sub clear {
|
||||
my $self = shift;
|
||||
|
||||
$self->{names} = [];
|
||||
$self->{files} = {};
|
||||
$self->{type} = undef;
|
||||
}
|
||||
|
||||
sub read {
|
||||
my $self = shift;
|
||||
my $file = shift;
|
||||
|
||||
my $fh = $self->_get_handle($file);
|
||||
local $/ = undef;
|
||||
my $data = <$fh>;
|
||||
close $fh;
|
||||
|
||||
return $self->read_memory($data);
|
||||
}
|
||||
|
||||
sub read_memory {
|
||||
my $self = shift;
|
||||
my $data = shift;
|
||||
|
||||
$self->clear();
|
||||
return unless $self->_parse($data);
|
||||
return length($data);
|
||||
}
|
||||
|
||||
sub contains_file {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
|
||||
return unless defined $filename;
|
||||
return exists $self->{files}->{$filename};
|
||||
}
|
||||
|
||||
sub extract {
|
||||
my $self = shift;
|
||||
|
||||
for my $filename (@_ ? @_ : @{$self->{names}}) {
|
||||
$self->extract_file($filename) or return;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub extract_file {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $target = shift || $filename;
|
||||
|
||||
my $meta = $self->{files}->{$filename};
|
||||
return $self->_error("$filename: not in archive") unless $meta;
|
||||
open my $fh, '>', $target or return $self->_error("$target: $!");
|
||||
binmode $fh;
|
||||
syswrite $fh, $meta->{data} or return $self->_error("$filename: $!");
|
||||
close $fh or return $self->_error("$filename: $!");
|
||||
if (CAN_CHOWN && $self->{opts}->{chown}) {
|
||||
chown $meta->{uid}, $meta->{gid}, $filename or
|
||||
return $self->_error("$filename: $!");
|
||||
}
|
||||
if ($self->{opts}->{chmod}) {
|
||||
my $mode = $meta->{mode};
|
||||
unless ($self->{opts}->{same_perms}) {
|
||||
$mode &= ~(oct(7000) | (umask | 0));
|
||||
}
|
||||
chmod $mode, $filename or return $self->_error("$filename: $!");
|
||||
}
|
||||
utime $meta->{date}, $meta->{date}, $filename or
|
||||
return $self->_error("$filename: $!");
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub rename {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $target = shift;
|
||||
|
||||
if ($self->{files}->{$filename}) {
|
||||
$self->{files}->{$target} = $self->{files}->{$filename};
|
||||
delete $self->{files}->{$filename};
|
||||
for (@{$self->{names}}) {
|
||||
if ($_ eq $filename) {
|
||||
$_ = $target;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub chmod {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $mode = shift; # octal string or numeric
|
||||
|
||||
return unless $self->{files}->{$filename};
|
||||
$self->{files}->{$filename}->{mode} =
|
||||
$mode + 0 eq $mode ? $mode : oct($mode);
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub chown {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $uid = shift;
|
||||
my $gid = shift;
|
||||
|
||||
return unless $self->{files}->{$filename};
|
||||
$self->{files}->{$filename}->{uid} = $uid if $uid >= 0;
|
||||
$self->{files}->{$filename}->{gid} = $gid if defined $gid && $gid >= 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub remove {
|
||||
my $self = shift;
|
||||
my $files = ref $_[0] ? shift : \@_;
|
||||
|
||||
my $nfiles_orig = scalar @{$self->{names}};
|
||||
|
||||
for my $file (@$files) {
|
||||
next unless $file;
|
||||
if (exists($self->{files}->{$file})) {
|
||||
delete $self->{files}->{$file};
|
||||
}
|
||||
else {
|
||||
$self->_error("$file: no such member")
|
||||
}
|
||||
}
|
||||
@{$self->{names}} = grep($self->{files}->{$_}, @{$self->{names}});
|
||||
|
||||
return $nfiles_orig - scalar @{$self->{names}};
|
||||
}
|
||||
|
||||
sub list_files {
|
||||
my $self = shift;
|
||||
|
||||
return wantarray ? @{$self->{names}} : $self->{names};
|
||||
}
|
||||
|
||||
sub add_files {
|
||||
my $self = shift;
|
||||
my $files = ref $_[0] ? shift : \@_;
|
||||
|
||||
for my $path (@$files) {
|
||||
if (open my $fd, $path) {
|
||||
my @st = stat $fd or return $self->_error("$path: $!");
|
||||
local $/ = undef;
|
||||
binmode $fd;
|
||||
my $content = <$fd>;
|
||||
close $fd;
|
||||
|
||||
my $filename = (File::Spec->splitpath($path))[2];
|
||||
|
||||
$self->_add_data($filename, $content, @st[9,4,5,2,7]);
|
||||
}
|
||||
else {
|
||||
$self->_error("$path: $!");
|
||||
}
|
||||
}
|
||||
return scalar @{$self->{names}};
|
||||
}
|
||||
|
||||
sub add_data {
|
||||
my $self = shift;
|
||||
my $path = shift;
|
||||
my $content = shift;
|
||||
my $params = shift || {};
|
||||
|
||||
return $self->_error("No filename given") unless $path;
|
||||
|
||||
my $filename = (File::Spec->splitpath($path))[2];
|
||||
|
||||
$self->_add_data($filename, $content,
|
||||
$params->{date} || timelocal(localtime()),
|
||||
$params->{uid} || 0,
|
||||
$params->{gid} || 0,
|
||||
$params->{mode} || 0100644) or return;
|
||||
|
||||
return $self->{files}->{$filename}->{size};
|
||||
}
|
||||
|
||||
sub write {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $opts = {(%{$self->{opts}}, %{shift || {}})};
|
||||
my $type = $opts->{type} || $self->{type} || COMMON;
|
||||
|
||||
my @body = ( ARMAG );
|
||||
|
||||
my %gnuindex;
|
||||
my @filenames = @{$self->{names}};
|
||||
if ($type eq GNU) {
|
||||
#
|
||||
# construct extended filename index, if needed
|
||||
#
|
||||
if (my @longs = grep(length($_) > 15, @filenames)) {
|
||||
my $ptr = 0;
|
||||
for my $long (@longs) {
|
||||
$gnuindex{$long} = $ptr;
|
||||
$ptr += length($long) + 2;
|
||||
}
|
||||
push @body, pack('A16A32A10A2', '//', '', $ptr, ARFMAG),
|
||||
join("/\n", @longs, '');
|
||||
push @body, "\n" if $ptr % 2; # padding
|
||||
}
|
||||
}
|
||||
for my $fn (@filenames) {
|
||||
my $meta = $self->{files}->{$fn};
|
||||
my $mode = sprintf('%o', $meta->{mode});
|
||||
my $size = $meta->{size};
|
||||
my $name;
|
||||
|
||||
if ($type eq GNU) {
|
||||
$fn = '' if defined $opts->{symbols} && $fn eq $opts->{symbols};
|
||||
$name = $fn . '/';
|
||||
}
|
||||
else {
|
||||
$name = $fn;
|
||||
}
|
||||
if (length($name) <= 16 || $type eq COMMON) {
|
||||
push @body, pack('A16A12A6A6A8A10A2', $name,
|
||||
@$meta{qw/date uid gid/}, $mode, $size, ARFMAG);
|
||||
}
|
||||
elsif ($type eq GNU) {
|
||||
push @body, pack('A1A15A12A6A6A8A10A2', '/', $gnuindex{$fn},
|
||||
@$meta{qw/date uid gid/}, $mode, $size, ARFMAG);
|
||||
}
|
||||
elsif ($type eq BSD) {
|
||||
$size += length($name);
|
||||
push @body, pack('A3A13A12A6A6A8A10A2', AR_EFMT1, length($name),
|
||||
@$meta{qw/date uid gid/}, $mode, $size, ARFMAG),
|
||||
$name;
|
||||
}
|
||||
else {
|
||||
return $self->_error("$type: unexpected ar type");
|
||||
}
|
||||
push @body, $meta->{data};
|
||||
push @body, "\n" if $size % 2; # padding
|
||||
}
|
||||
if ($filename) {
|
||||
my $fh = $self->_get_handle($filename, '>');
|
||||
print $fh @body;
|
||||
close $fh;
|
||||
my $len = 0;
|
||||
$len += length($_) for @body;
|
||||
return $len;
|
||||
}
|
||||
else {
|
||||
return join '', @body;
|
||||
}
|
||||
}
|
||||
|
||||
sub get_content {
|
||||
my $self = shift;
|
||||
my ($filename) = @_;
|
||||
|
||||
unless ($filename) {
|
||||
$self->_error("get_content can't continue without a filename");
|
||||
return;
|
||||
}
|
||||
|
||||
unless (exists($self->{files}->{$filename})) {
|
||||
$self->_error(
|
||||
"get_content failed because there is not a file named $filename");
|
||||
return;
|
||||
}
|
||||
|
||||
return $self->{files}->{$filename};
|
||||
}
|
||||
|
||||
sub get_data {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
|
||||
return $self->_error("$filename: no such member")
|
||||
unless exists $self->{files}->{$filename};
|
||||
return $self->{files}->{$filename}->{data};
|
||||
}
|
||||
|
||||
sub get_handle {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $fh;
|
||||
|
||||
return $self->_error("$filename: no such member")
|
||||
unless exists $self->{files}->{$filename};
|
||||
if ($has_io_string) {
|
||||
$fh = IO::String->new($self->{files}->{$filename}->{data});
|
||||
}
|
||||
else {
|
||||
my $data = $self->{files}->{$filename}->{data};
|
||||
open $fh, '<', \$data or return $self->_error("in-memory file: $!");
|
||||
}
|
||||
return $fh;
|
||||
}
|
||||
|
||||
sub error {
|
||||
my $self = shift;
|
||||
|
||||
return shift() ? $self->{longmess} : $self->{error};
|
||||
}
|
||||
|
||||
#
|
||||
# deprecated
|
||||
#
|
||||
sub DEBUG {
|
||||
my $self = shift;
|
||||
my $debug = shift;
|
||||
|
||||
$self->{opts}->{warn} = 1 unless (defined($debug) and int($debug) == 0);
|
||||
}
|
||||
|
||||
sub _parse {
|
||||
my $self = shift;
|
||||
my $data = shift;
|
||||
|
||||
unless (substr($data, 0, SARMAG, '') eq ARMAG) {
|
||||
return $self->_error("Bad magic number - not an ar archive");
|
||||
}
|
||||
my $type;
|
||||
my $names;
|
||||
while ($data =~ /\S/) {
|
||||
my ($name, $date, $uid, $gid, $mode, $size, $magic) =
|
||||
unpack('A16A12A6A6A8A10a2', substr($data, 0, 60, ''));
|
||||
unless ($magic eq "`\n") {
|
||||
return $self->_error("Bad file header");
|
||||
}
|
||||
if ($name =~ m|^/|) {
|
||||
$type = GNU;
|
||||
if ($name eq '//') {
|
||||
$names = substr($data, 0, $size, '');
|
||||
substr($data, 0, $size % 2, '');
|
||||
next;
|
||||
}
|
||||
elsif ($name eq '/') {
|
||||
$name = $self->{opts}->{symbols};
|
||||
unless (defined $name && $name) {
|
||||
substr($data, 0, $size + $size % 2, '');
|
||||
next;
|
||||
}
|
||||
}
|
||||
else {
|
||||
$name = substr($names, int(substr($name, 1)));
|
||||
$name =~ s/\n.*//;
|
||||
chop $name;
|
||||
}
|
||||
}
|
||||
elsif ($name =~ m|^#1/|) {
|
||||
$type = BSD;
|
||||
$name = substr($data, 0, int(substr($name, 3)), '');
|
||||
$size -= length($name);
|
||||
}
|
||||
else {
|
||||
if ($name =~ m|/$|) {
|
||||
$type ||= GNU; # only gnu has trailing slashes
|
||||
chop $name;
|
||||
}
|
||||
}
|
||||
$uid = int($uid);
|
||||
$gid = int($gid);
|
||||
$mode = oct($mode);
|
||||
my $content = substr($data, 0, $size, '');
|
||||
substr($data, 0, $size % 2, '');
|
||||
|
||||
$self->_add_data($name, $content, $date, $uid, $gid, $mode, $size);
|
||||
}
|
||||
$self->{type} = $type || COMMON;
|
||||
return scalar @{$self->{names}};
|
||||
}
|
||||
|
||||
sub _add_data {
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $content = shift || '';
|
||||
my $date = shift;
|
||||
my $uid = shift;
|
||||
my $gid = shift;
|
||||
my $mode = shift;
|
||||
my $size = shift;
|
||||
|
||||
if (exists($self->{files}->{$filename})) {
|
||||
return $self->_error("$filename: entry already exists");
|
||||
}
|
||||
$self->{files}->{$filename} = {
|
||||
name => $filename,
|
||||
date => defined $date ? $date : timelocal(localtime()),
|
||||
uid => defined $uid ? $uid : 0,
|
||||
gid => defined $gid ? $gid : 0,
|
||||
mode => defined $mode ? $mode : 0100644,
|
||||
size => defined $size ? $size : length($content),
|
||||
data => $content,
|
||||
};
|
||||
push @{$self->{names}}, $filename;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub _get_handle {
|
||||
my $self = shift;
|
||||
my $file = shift;
|
||||
my $mode = shift || '<';
|
||||
|
||||
if (ref $file) {
|
||||
return $file if eval{*$file{IO}} or $file->isa('IO::Handle');
|
||||
return $self->_error("Not a filehandle");
|
||||
}
|
||||
else {
|
||||
open my $fh, $mode, $file or return $self->_error("$file: $!");
|
||||
binmode $fh;
|
||||
return $fh;
|
||||
}
|
||||
}
|
||||
|
||||
sub _error {
|
||||
my $self = shift;
|
||||
my $msg = shift;
|
||||
|
||||
$self->{error} = $msg;
|
||||
$self->{longerror} = longmess($msg);
|
||||
if ($self->{opts}->{warn} > 1) {
|
||||
carp $self->{longerror};
|
||||
}
|
||||
elsif ($self->{opts}->{warn}) {
|
||||
carp $self->{error};
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Archive::Ar - Interface for manipulating ar archives
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use Archive::Ar;
|
||||
|
||||
my $ar = Archive::Ar->new;
|
||||
|
||||
$ar->read('./foo.ar');
|
||||
$ar->extract;
|
||||
|
||||
$ar->add_files('./bar.tar.gz', 'bat.pl')
|
||||
$ar->add_data('newfile.txt','Some contents');
|
||||
|
||||
$ar->chmod('file1', 0644);
|
||||
$ar->chown('file1', $uid, $gid);
|
||||
|
||||
$ar->remove('file1', 'file2');
|
||||
|
||||
my $filehash = $ar->get_content('bar.tar.gz');
|
||||
my $data = $ar->get_data('bar.tar.gz');
|
||||
my $handle = $ar->get_handle('bar.tar.gz');
|
||||
|
||||
my @files = $ar->list_files();
|
||||
|
||||
my $archive = $ar->write;
|
||||
my $size = $ar->write('outbound.ar');
|
||||
|
||||
$ar->error();
|
||||
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Archive::Ar is a pure-perl way to handle standard ar archives.
|
||||
|
||||
This is useful if you have those types of archives on the system, but it
|
||||
is also useful because .deb packages for the Debian GNU/Linux distribution are
|
||||
ar archives. This is one building block in a future chain of modules to build,
|
||||
manipulate, extract, and test debian modules with no platform or architecture
|
||||
dependence.
|
||||
|
||||
You may notice that the API to Archive::Ar is similar to Archive::Tar, and
|
||||
this was done intentionally to keep similarity between the Archive::*
|
||||
modules.
|
||||
|
||||
=head1 METHODS
|
||||
|
||||
=head2 new
|
||||
|
||||
$ar = Archive::Ar->new()
|
||||
$ar = Archive::Ar->new($filename)
|
||||
$ar = Archive::Ar->new($filehandle)
|
||||
|
||||
Returns a new Archive::Ar object. Without an argument, it returns
|
||||
an empty object. If passed a filename or an open filehandle, it will
|
||||
read the referenced archive into memory. If the read fails for any
|
||||
reason, returns undef.
|
||||
|
||||
=head2 set_opt
|
||||
|
||||
$ar->set_opt($name, $val)
|
||||
|
||||
Assign option $name value $val. Possible options are:
|
||||
|
||||
=over 4
|
||||
|
||||
=item * warn
|
||||
|
||||
Warning level. Levels are zero for no warnings, 1 for brief warnings,
|
||||
and 2 for warnings with a stack trace. Default is zero.
|
||||
|
||||
=item * chmod
|
||||
|
||||
Change the file permissions of files created when extracting. Default
|
||||
is true (non-zero).
|
||||
|
||||
=item * same_perms
|
||||
|
||||
When setting file permissions, use the values in the archive unchanged.
|
||||
If false, removes setuid bits and applies the user's umask. Default is
|
||||
true for the root user, false otherwise.
|
||||
|
||||
=item * chown
|
||||
|
||||
Change the owners of extracted files, if possible. Default is true.
|
||||
|
||||
=item * type
|
||||
|
||||
Archive type. May be GNU, BSD or COMMON, or undef if no archive has
|
||||
been read. Defaults to the type of the archive read, or undef.
|
||||
|
||||
=item * symbols
|
||||
|
||||
Provide a filename for the symbol table, if present. If set, the symbol
|
||||
table is treated as a file that can be read from or written to an archive.
|
||||
It is an error if the filename provided matches the name of a file in the
|
||||
archive. If undefined, the symbol table is ignored. Defaults to undef.
|
||||
|
||||
=back
|
||||
|
||||
=head2 get_opt
|
||||
|
||||
$val = $ar->get_opt($name)
|
||||
|
||||
Returns the value of option $name.
|
||||
|
||||
=head2 type
|
||||
|
||||
$type = $ar->type()
|
||||
|
||||
Returns the type of the ar archive. The type is undefined until an
|
||||
archive is loaded. If the archive displays characteristics of a gnu-style
|
||||
archive, GNU is returned. If it looks like a bsd-style archive, BSD
|
||||
is returned. Otherwise, COMMON is returned. Note that unless filenames
|
||||
exceed 16 characters in length, bsd archives look like the common format.
|
||||
|
||||
=head2 clear
|
||||
|
||||
$ar->clear()
|
||||
|
||||
Clears the current in-memory archive.
|
||||
|
||||
=head2 read
|
||||
|
||||
$len = $ar->read($filename)
|
||||
$len = $ar->read($filehandle)
|
||||
|
||||
This reads a new file into the object, removing any ar archive already
|
||||
represented in the object. The argument may be a filename, filehandle
|
||||
or IO::Handle object. Returns the size of the file contents or undef
|
||||
if it fails.
|
||||
|
||||
=head2 read_memory
|
||||
|
||||
$len = $ar->read_memory($data)
|
||||
|
||||
Parses the string argument as an archive, reading it into memory. Replaces
|
||||
any previously loaded archive. Returns the number of bytes read, or undef
|
||||
if it fails.
|
||||
|
||||
=head2 contains_file
|
||||
|
||||
$bool = $ar->contains_file($filename)
|
||||
|
||||
Returns true if the archive contains a file with $filename. Returns
|
||||
undef otherwise.
|
||||
|
||||
=head2 extract
|
||||
|
||||
$ar->extract()
|
||||
$ar->extract_file($filename)
|
||||
|
||||
Extracts files from the archive. The first form extracts all files, the
|
||||
latter extracts just the named file. Extracted files are assigned the
|
||||
permissions and modification time stored in the archive, and, if possible,
|
||||
the user and group ownership. Returns non-zero upon success, or undef if
|
||||
failure.
|
||||
|
||||
=head2 rename
|
||||
|
||||
$ar->rename($filename, $newname)
|
||||
|
||||
Changes the name of a file in the in-memory archive.
|
||||
|
||||
=head2 chmod
|
||||
|
||||
$ar->chmod($filename, $mode);
|
||||
|
||||
Change the mode of the member to C<$mode>.
|
||||
|
||||
=head2 chown
|
||||
|
||||
$ar->chown($filename, $uid, $gid);
|
||||
$ar->chown($filename, $uid);
|
||||
|
||||
Change the ownership of the member to user id C<$uid> and (optionally)
|
||||
group id C<$gid>. Negative id values are ignored.
|
||||
|
||||
=head2 remove
|
||||
|
||||
$ar->remove(@filenames)
|
||||
$ar->remove($arrayref)
|
||||
|
||||
Removes files from the in-memory archive. Returns the number of files
|
||||
removed.
|
||||
|
||||
=head2 list_files
|
||||
|
||||
@filenames = $ar->list_files()
|
||||
|
||||
Returns a list of the names of all the files in the archive.
|
||||
If called in a scalar context, returns a reference to an array.
|
||||
|
||||
=head2 add_files
|
||||
|
||||
$ar->add_files(@filenames)
|
||||
$ar->add_files($arrayref)
|
||||
|
||||
Adds files to the archive. The arguments can be paths, but only the
|
||||
filenames are stored in the archive. Stores the uid, gid, mode, size,
|
||||
and modification timestamp of the file as returned by C<stat()>.
|
||||
|
||||
Returns the number of files successfully added, or undef if failure.
|
||||
|
||||
=head2 add_data
|
||||
|
||||
$ar->add_data("filename", $data)
|
||||
$ar->add_data("filename", $data, $options)
|
||||
|
||||
Adds a file to the in-memory archive with name $filename and content
|
||||
$data. File properties can be set with $optional_hashref:
|
||||
|
||||
$options = {
|
||||
'data' => $data,
|
||||
'uid' => $uid, #defaults to zero
|
||||
'gid' => $gid, #defaults to zero
|
||||
'date' => $date, #date in epoch seconds. Defaults to now.
|
||||
'mode' => $mode, #defaults to 0100644;
|
||||
}
|
||||
|
||||
You cannot add_data over another file however. This returns the file length in
|
||||
bytes if it is successful, undef otherwise.
|
||||
|
||||
=head2 write
|
||||
|
||||
$data = $ar->write()
|
||||
$len = $ar->write($filename)
|
||||
|
||||
Returns the archive as a string, or writes it to disk as $filename.
|
||||
Returns the archive size upon success when writing to disk. Returns
|
||||
undef if failure.
|
||||
|
||||
=head2 get_content
|
||||
|
||||
$content = $ar->get_content($filename)
|
||||
|
||||
This returns a hash with the file content in it, including the data
|
||||
that the file would contain. If the file does not exist or no filename
|
||||
is given, this returns undef. On success, a hash is returned:
|
||||
|
||||
$content = {
|
||||
'name' => $filename,
|
||||
'date' => $mtime,
|
||||
'uid' => $uid,
|
||||
'gid' => $gid,
|
||||
'mode' => $mode,
|
||||
'size' => $size,
|
||||
'data' => $file_contents,
|
||||
}
|
||||
|
||||
=head2 get_data
|
||||
|
||||
$data = $ar->get_data("filename")
|
||||
|
||||
Returns a scalar containing the file data of the given archive
|
||||
member. Upon error, returns undef.
|
||||
|
||||
=head2 get_handle
|
||||
|
||||
$handle = $ar->get_handle("filename")>
|
||||
|
||||
Returns a file handle to the in-memory file data of the given archive member.
|
||||
Upon error, returns undef. This can be useful for unpacking nested archives.
|
||||
Uses IO::String if it's loaded.
|
||||
|
||||
=head2 error
|
||||
|
||||
$errstr = $ar->error($trace)
|
||||
|
||||
Returns the current error string, which is usually the last error reported.
|
||||
If a true value is provided, returns the error message and stack trace.
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
See https://github.com/jbazik/Archive-Ar/issues/ to report and view bugs.
|
||||
|
||||
=head1 SOURCE
|
||||
|
||||
The source code repository for Archive::Ar can be found at http://github.com/jbazik/Archive-Ar/.
|
||||
|
||||
=head1 COPYRIGHT
|
||||
|
||||
Copyright 2009-2014 John Bazik E<lt>jbazik@cpan.orgE<gt>.
|
||||
|
||||
Copyright 2003 Jay Bonci E<lt>jaybonci@cpan.orgE<gt>.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the same terms as Perl itself.
|
||||
|
||||
See http://www.perl.com/perl/misc/Artistic.html
|
||||
|
||||
=cut
|
|
@ -9,8 +9,14 @@
|
|||
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>elasticsearch-deb</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.deb</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<name>Elasticsearch DEB Distribution</name>
|
||||
<!--
|
||||
We should use deb packaging here because we don't want to publish any jar.
|
||||
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
||||
-->
|
||||
<!--packaging>deb</packaging-->
|
||||
|
||||
<properties>
|
||||
<deb.sign>false</deb.sign>
|
||||
|
@ -19,8 +25,8 @@
|
|||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.distribution</groupId>
|
||||
<artifactId>elasticsearch-fully-loaded</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>${elasticsearch.version}</version>
|
||||
<type>pom</type>
|
||||
</dependency>
|
||||
|
@ -90,7 +96,6 @@
|
|||
<!-- some infos https://github.com/tcurdt/jdeb/blob/master/docs/maven.md -->
|
||||
<artifactId>jdeb</artifactId>
|
||||
<groupId>org.vafer</groupId>
|
||||
<version>1.4</version>
|
||||
<configuration>
|
||||
<!-- By default it should generates target/${artifactId}_${version}.deb but we get elasticsearch_2.0.0~SNAPSHOT_all.deb -->
|
||||
<deb>${project.build.directory}/releases/elasticsearch-${project.version}.deb</deb>
|
||||
|
@ -253,33 +258,11 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>check-license</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<condition property="licenses.exists">
|
||||
<available file="${basedir}/../licenses" type="dir"/>
|
||||
</condition>
|
||||
<echo taskName="license check">Running license check</echo>
|
||||
<!-- on windows checksums are calculated wrong -->
|
||||
<exec failonerror="${licenses.exists}" executable="perl" dir="${elasticsearch.tools.directory}/license-check" osfamily="unix" >
|
||||
<arg value="check_license_and_sha.pl"/>
|
||||
<arg value="--check"/>
|
||||
<arg value="${basedir}/../licenses"/>
|
||||
<arg value="${basedir}/target/releases/${project.build.finalName}.deb"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- start up external cluster -->
|
||||
<execution>
|
||||
<id>integ-setup</id>
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>elasticsearch-fully-loaded</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<name>Elasticsearch with all optional dependencies</name>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
|
|
|
@ -71,7 +71,6 @@
|
|||
</dependencies>
|
||||
|
||||
<build>
|
||||
<finalName>elasticsearch-${project.version}</finalName>
|
||||
<plugins>
|
||||
<!-- We copy libs for deb and rpm -->
|
||||
<plugin>
|
||||
|
@ -97,8 +96,32 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<!-- <version>1.8</version> -->
|
||||
<!-- <configuration><skip>true</skip></configuration> -->
|
||||
<!-- checks integration test scratch area (where we extract the distribution) -->
|
||||
<executions>
|
||||
<execution>
|
||||
<id>check-license</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.integ.tests}</skip>
|
||||
<target>
|
||||
<condition property="licenses.exists">
|
||||
<available file="${basedir}/../licenses" type="dir"/>
|
||||
</condition>
|
||||
<echo taskName="license check">Running license check</echo>
|
||||
<exec failonerror="${licenses.exists}" executable="perl"
|
||||
dir="${elasticsearch.tools.directory}/license-check">
|
||||
<arg value="check_license_and_sha.pl"/>
|
||||
<arg value="--check"/>
|
||||
<arg value="${basedir}/../licenses"/>
|
||||
<arg value="${integ.scratch}"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
|
@ -132,6 +155,14 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<!-- distribution modules don't provide any jar. No need to upload empty jars to maven central -->
|
||||
<skipIfEmpty>true</skipIfEmpty>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.carrotsearch.randomizedtesting</groupId>
|
||||
<artifactId>junit4-maven-plugin</artifactId>
|
||||
|
@ -143,7 +174,7 @@
|
|||
<parallelism>1</parallelism>
|
||||
<systemProperties>
|
||||
<!-- use external cluster -->
|
||||
<tests.cluster>127.0.0.1:9300</tests.cluster>
|
||||
<tests.cluster>127.0.0.1:${integ.transport.port}</tests.cluster>
|
||||
</systemProperties>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
|
|
@ -9,19 +9,25 @@
|
|||
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>elasticsearch-rpm</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.rpm</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<name>Elasticsearch RPM Distribution</name>
|
||||
<packaging>rpm</packaging>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.distribution</groupId>
|
||||
<artifactId>elasticsearch-fully-loaded</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>${elasticsearch.version}</version>
|
||||
<type>pom</type>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<properties>
|
||||
<skip.integ.tests>true</skip.integ.tests>
|
||||
<rpm.outputDirectory>${project.build.directory}/releases/</rpm.outputDirectory>
|
||||
</properties>
|
||||
|
||||
<build>
|
||||
|
||||
<filters>
|
||||
|
@ -300,8 +306,7 @@
|
|||
<version>${project.version}</version>
|
||||
<type>${project.packaging}</type>
|
||||
<overWrite>true</overWrite>
|
||||
<outputDirectory>${project.build.directory}/releases/</outputDirectory>
|
||||
<destFileName>elasticsearch-${project.version}.rpm</destFileName>
|
||||
<outputDirectory>${rpm.outputDirectory}</outputDirectory>
|
||||
</artifactItem>
|
||||
</artifactItems>
|
||||
</configuration>
|
||||
|
@ -311,32 +316,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>check-license</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<!-- https://github.com/elastic/elasticsearch/issues/12528 -->
|
||||
<skip>true</skip>
|
||||
<target>
|
||||
<condition property="licenses.exists">
|
||||
<available file="${basedir}/../licenses" type="dir"/>
|
||||
</condition>
|
||||
<echo taskName="license check">Running license check</echo>
|
||||
<!-- on windows checksums are calculated wrong -->
|
||||
<exec failonerror="${licenses.exists}" executable="perl" dir="${elasticsearch.tools.directory}/license-check" osfamily="unix" >
|
||||
<arg value="check_license_and_sha.pl"/>
|
||||
<arg value="--check"/>
|
||||
<arg value="${basedir}/../licenses"/>
|
||||
<arg value="${basedir}/target/releases/${project.build.finalName}.rpm"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- start up external cluster -->
|
||||
<execution>
|
||||
<id>integ-setup</id>
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>elasticsearch-shaded</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.shaded</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<name>Elasticsearch Shaded Distribution</name>
|
||||
|
||||
<dependencies>
|
||||
|
@ -21,13 +22,13 @@
|
|||
</dependencies>
|
||||
|
||||
<build>
|
||||
<finalName>${project.artifactId}-${project.version}</finalName>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<!-- see docs, incremental builds dont play well with shading otherwise -->
|
||||
<configuration>
|
||||
<skipIfEmpty>false</skipIfEmpty>
|
||||
<forceCreation>true</forceCreation>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
|
@ -4,10 +4,11 @@
|
|||
<outputDirectory>/lib</outputDirectory>
|
||||
<useTransitiveDependencies>true</useTransitiveDependencies>
|
||||
<useTransitiveFiltering>false</useTransitiveFiltering>
|
||||
<useProjectArtifact>false</useProjectArtifact>
|
||||
<excludes>
|
||||
<!-- don't include our POMs etc that we use internally -->
|
||||
<!-- otherwise, don't filter here. mark deps properly instead -->
|
||||
<exclude>org.elasticsearch.distribution:*</exclude>
|
||||
<exclude>*:pom</exclude>
|
||||
</excludes>
|
||||
</dependencySet>
|
||||
</dependencySets>
|
||||
|
|
|
@ -69,15 +69,15 @@ fi
|
|||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
-D*=*)
|
||||
properties="$properties $1"
|
||||
properties="$properties \"$1\""
|
||||
;;
|
||||
-D*)
|
||||
var=$1
|
||||
shift
|
||||
properties="$properties $var=$1"
|
||||
properties="$properties \"$var\"=\"$1\""
|
||||
;;
|
||||
*)
|
||||
args="$args $1"
|
||||
args="$args \"$1\""
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
@ -88,7 +88,7 @@ if [ -e "$CONF_DIR" ]; then
|
|||
*-Des.default.path.conf=*|*-Des.path.conf=*)
|
||||
;;
|
||||
*)
|
||||
properties="$properties -Des.default.path.conf=$CONF_DIR"
|
||||
properties="$properties -Des.default.path.conf=\"$CONF_DIR\""
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
@ -98,11 +98,11 @@ if [ -e "$CONF_FILE" ]; then
|
|||
*-Des.default.config=*|*-Des.config=*)
|
||||
;;
|
||||
*)
|
||||
properties="$properties -Des.default.config=$CONF_FILE"
|
||||
properties="$properties -Des.default.config=\"$CONF_FILE\""
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
export HOSTNAME=`hostname -s`
|
||||
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="$ES_HOME" $properties -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginManagerCliParser $args
|
||||
eval "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="\"$ES_HOME\"" $properties -cp "\"$ES_HOME/lib/*\"" org.elasticsearch.plugins.PluginManagerCliParser $args
|
||||
|
|
|
@ -257,3 +257,101 @@ setup() {
|
|||
run rm -rf "$TEMP_CONFIG_DIR"
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "[TAR] install shield plugin to elasticsearch directory with a space" {
|
||||
export ES_DIR="/tmp/elastic search"
|
||||
|
||||
# Install the archive
|
||||
install_archive
|
||||
|
||||
# Checks that the archive is correctly installed
|
||||
verify_archive_installation
|
||||
|
||||
# Move the Elasticsearch installation to a directory with a space in it
|
||||
rm -rf "$ES_DIR"
|
||||
mv /tmp/elasticsearch "$ES_DIR"
|
||||
|
||||
# Checks that plugin archive is available
|
||||
[ -e "$SHIELD_ZIP" ]
|
||||
|
||||
# Install Shield
|
||||
run "$ES_DIR/bin/plugin" install elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Checks that Shield is correctly installed
|
||||
assert_file_exist "$ES_DIR/bin/shield"
|
||||
assert_file_exist "$ES_DIR/bin/shield/esusers"
|
||||
assert_file_exist "$ES_DIR/bin/shield/syskeygen"
|
||||
assert_file_exist "$ES_DIR/config/shield"
|
||||
assert_file_exist "$ES_DIR/config/shield/role_mapping.yml"
|
||||
assert_file_exist "$ES_DIR/config/shield/roles.yml"
|
||||
assert_file_exist "$ES_DIR/config/shield/users"
|
||||
assert_file_exist "$ES_DIR/config/shield/users_roles"
|
||||
assert_file_exist "$ES_DIR/plugins/shield"
|
||||
|
||||
# Remove the plugin
|
||||
run "$ES_DIR/bin/plugin" remove elasticsearch/shield/latest
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Checks that the plugin is correctly removed
|
||||
assert_file_not_exist "$ES_DIR/bin/shield"
|
||||
assert_file_exist "$ES_DIR/config/shield"
|
||||
assert_file_exist "$ES_DIR/config/shield/role_mapping.yml"
|
||||
assert_file_exist "$ES_DIR/config/shield/roles.yml"
|
||||
assert_file_exist "$ES_DIR/config/shield/users"
|
||||
assert_file_exist "$ES_DIR/config/shield/users_roles"
|
||||
assert_file_not_exist "$ES_DIR/plugins/shield"
|
||||
|
||||
#Cleanup our temporary Elasticsearch installation
|
||||
rm -rf "$ES_DIR"
|
||||
}
|
||||
|
||||
@test "[TAR] install shield plugin from a directory with a space" {
|
||||
|
||||
export SHIELD_ZIP_WITH_SPACE="/tmp/plugins with space/shield.zip"
|
||||
|
||||
# Install the archive
|
||||
install_archive
|
||||
|
||||
# Checks that the archive is correctly installed
|
||||
verify_archive_installation
|
||||
|
||||
# Checks that plugin archive is available
|
||||
[ -e "$SHIELD_ZIP" ]
|
||||
|
||||
# Copy the shield plugin to a directory with a space in it
|
||||
rm -f "$SHIELD_ZIP_WITH_SPACE"
|
||||
mkdir -p "$(dirname "$SHIELD_ZIP_WITH_SPACE")"
|
||||
cp $SHIELD_ZIP "$SHIELD_ZIP_WITH_SPACE"
|
||||
|
||||
# Install Shield
|
||||
run /tmp/elasticsearch/bin/plugin install elasticsearch/shield/latest -u "file://$SHIELD_ZIP_WITH_SPACE"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Checks that Shield is correctly installed
|
||||
assert_file_exist "/tmp/elasticsearch/bin/shield"
|
||||
assert_file_exist "/tmp/elasticsearch/bin/shield/esusers"
|
||||
assert_file_exist "/tmp/elasticsearch/bin/shield/syskeygen"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/role_mapping.yml"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/roles.yml"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/users"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/users_roles"
|
||||
assert_file_exist "/tmp/elasticsearch/plugins/shield"
|
||||
|
||||
# Remove the plugin
|
||||
run /tmp/elasticsearch/bin/plugin remove elasticsearch/shield/latest
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Checks that the plugin is correctly removed
|
||||
assert_file_not_exist "/tmp/elasticsearch/bin/shield"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/role_mapping.yml"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/roles.yml"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/users"
|
||||
assert_file_exist "/tmp/elasticsearch/config/shield/users_roles"
|
||||
assert_file_not_exist "/tmp/elasticsearch/plugins/shield"
|
||||
|
||||
#Cleanup our plugin directory with a space
|
||||
rm -rf "$SHIELD_ZIP_WITH_SPACE"
|
||||
}
|
||||
|
|
|
@ -9,13 +9,19 @@
|
|||
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>elasticsearch-tar</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.tar</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<name>Elasticsearch TAR Distribution</name>
|
||||
<!--
|
||||
We should use pom packaging here because we don't want to publish any jar.
|
||||
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
||||
-->
|
||||
<!--packaging>pom</packaging-->
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.distribution</groupId>
|
||||
<artifactId>elasticsearch-fully-loaded</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>${elasticsearch.version}</version>
|
||||
<type>pom</type>
|
||||
</dependency>
|
||||
|
@ -49,30 +55,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>check-license</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<condition property="licenses.exists">
|
||||
<available file="${basedir}/../licenses" type="dir"/>
|
||||
</condition>
|
||||
<echo taskName="license check">Running license check</echo>
|
||||
<!-- on windows checksums are calculated wrong -->
|
||||
<exec failonerror="${licenses.exists}" executable="perl" dir="${elasticsearch.tools.directory}/license-check" osfamily="unix" >
|
||||
<arg value="check_license_and_sha.pl"/>
|
||||
<arg value="--check"/>
|
||||
<arg value="${basedir}/../licenses"/>
|
||||
<arg value="${basedir}/target/releases/${project.build.finalName}.tar.gz"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- integration tests -->
|
||||
<!-- start up external cluster -->
|
||||
<execution>
|
||||
|
|
|
@ -9,13 +9,19 @@
|
|||
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>elasticsearch-zip</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.zip</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<name>Elasticsearch ZIP Distribution</name>
|
||||
<!--
|
||||
We should use pom packaging here because we don't want to publish any jar.
|
||||
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
||||
-->
|
||||
<!--packaging>pom</packaging-->
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.distribution</groupId>
|
||||
<artifactId>elasticsearch-fully-loaded</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>${elasticsearch.version}</version>
|
||||
<type>pom</type>
|
||||
</dependency>
|
||||
|
@ -49,30 +55,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>check-license</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<condition property="licenses.exists">
|
||||
<available file="${basedir}/../licenses" type="dir"/>
|
||||
</condition>
|
||||
<echo taskName="license check">Running license check</echo>
|
||||
<!-- on windows checksums are calculated wrong -->
|
||||
<exec failonerror="${licenses.exists}" executable="perl" dir="${elasticsearch.tools.directory}/license-check" osfamily="unix" >
|
||||
<arg value="check_license_and_sha.pl"/>
|
||||
<arg value="--check"/>
|
||||
<arg value="${basedir}/../licenses"/>
|
||||
<arg value="${basedir}/target/releases/${project.build.finalName}.zip"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- execution and integration tests -->
|
||||
<!-- start up elasticsearch in foreground -->
|
||||
<execution>
|
||||
|
@ -83,7 +66,11 @@
|
|||
</goals>
|
||||
<configuration>
|
||||
<target if="${run}">
|
||||
<ant antfile="${elasticsearch.integ.antfile}" target="start-foreground"/>
|
||||
<!-- use conventional port numbers -->
|
||||
<ant antfile="${elasticsearch.integ.antfile}" target="start-foreground">
|
||||
<property name="integ.http.port" value="9200"/>
|
||||
<property name="integ.transport.port" value="9300"/>
|
||||
</ant>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
|
|
@ -117,19 +117,3 @@ The result will look similar to:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
if your `plugin` data is subject to change use
|
||||
`plugins.info_refresh_interval` to change or disable the caching
|
||||
interval:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
# Change cache to 20 seconds
|
||||
plugins.info_refresh_interval: 20s
|
||||
|
||||
# Infinite cache
|
||||
plugins.info_refresh_interval: -1
|
||||
|
||||
# Disable cache
|
||||
plugins.info_refresh_interval: 0
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -76,7 +76,7 @@ uses type `nested`:
|
|||
{
|
||||
"type1" : {
|
||||
"properties" : {
|
||||
"users" : {
|
||||
"user" : {
|
||||
"type" : "nested",
|
||||
"properties": {
|
||||
"first" : {"type": "string" },
|
||||
|
@ -99,7 +99,7 @@ You may want to index inner objects both as `nested` fields *and* as flattened
|
|||
{
|
||||
"type1" : {
|
||||
"properties" : {
|
||||
"users" : {
|
||||
"user" : {
|
||||
"type" : "nested",
|
||||
"include_in_parent": true,
|
||||
"properties": {
|
||||
|
|
|
@ -304,7 +304,7 @@ That solves one of the two problems. The problem of differing term frequencies
|
|||
is solved by _blending_ the term frequencies for all fields in order to even
|
||||
out the differences. In other words, `first_name:smith` will be treated as
|
||||
though it has the same weight as `last_name:smith`. (Actually,
|
||||
`first_name:smith` is given a tiny advantage over `last_name:smith`, just to
|
||||
`last_name:smith` is given a tiny advantage over `first_name:smith`, just to
|
||||
make the order of results more stable.)
|
||||
|
||||
If you run the above query through the <<search-validate>>, it returns this
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[setup-service]]
|
||||
== Running As a Service on Linux
|
||||
== Running as a Service on Linux
|
||||
|
||||
In order to run elasticsearch as a service on your operating system, the provided packages try to make it as easy as possible for you to start and stop elasticsearch during reboot and upgrades.
|
||||
|
||||
|
|
|
@ -77,11 +77,24 @@ public class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
// Also, if invalid security credentials are used to execute this method, the
|
||||
// client is not able to distinguish between bucket permission errors and
|
||||
// invalid credential errors, and this method could return an incorrect result.
|
||||
if (!client.doesBucketExist(bucket)) {
|
||||
if (region != null) {
|
||||
client.createBucket(bucket, region);
|
||||
} else {
|
||||
client.createBucket(bucket);
|
||||
int retry = 0;
|
||||
while (retry <= maxRetries) {
|
||||
try {
|
||||
if (!client.doesBucketExist(bucket)) {
|
||||
if (region != null) {
|
||||
client.createBucket(bucket, region);
|
||||
} else {
|
||||
client.createBucket(bucket);
|
||||
}
|
||||
}
|
||||
break;
|
||||
} catch (AmazonClientException e) {
|
||||
if (shouldRetry(e) && retry < maxRetries) {
|
||||
retry++;
|
||||
} else {
|
||||
logger.debug("S3 client create bucket failed");
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -385,16 +385,14 @@
|
|||
<skip>${skip.integ.tests}</skip>
|
||||
<artifactItems>
|
||||
<artifactItem>
|
||||
<groupId>org.elasticsearch.distribution</groupId>
|
||||
<artifactId>elasticsearch-zip</artifactId>
|
||||
<groupId>org.elasticsearch.distribution.zip</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>${elasticsearch.version}</version>
|
||||
<type>zip</type>
|
||||
<overWrite>true</overWrite>
|
||||
<!-- these packaging builds sneakily rename their artifacts to look nothing
|
||||
like their artifact id, so we must duplicate that rename here -->
|
||||
<destFileName>elasticsearch-${elasticsearch.version}.zip</destFileName>
|
||||
</artifactItem>
|
||||
</artifactItems>
|
||||
<useBaseVersion>true</useBaseVersion>
|
||||
<outputDirectory>${integ.deps}</outputDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
@ -411,7 +409,7 @@
|
|||
<parallelism>1</parallelism>
|
||||
<systemProperties>
|
||||
<!-- use external cluster -->
|
||||
<tests.cluster>127.0.0.1:9300</tests.cluster>
|
||||
<tests.cluster>127.0.0.1:${integ.transport.port}</tests.cluster>
|
||||
</systemProperties>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
|
4
pom.xml
4
pom.xml
|
@ -109,6 +109,8 @@
|
|||
<integ.scratch>${project.build.directory}/integ-tests</integ.scratch>
|
||||
<integ.deps>${project.build.directory}/integ-deps</integ.deps>
|
||||
<integ.temp>${integ.scratch}/temp</integ.temp>
|
||||
<integ.http.port>9400</integ.http.port>
|
||||
<integ.transport.port>9500</integ.transport.port>
|
||||
<no.commit.pattern>\bno(n|)commit\b</no.commit.pattern>
|
||||
</properties>
|
||||
|
||||
|
@ -905,7 +907,7 @@
|
|||
<!-- some infos https://github.com/tcurdt/jdeb/blob/master/docs/maven.md -->
|
||||
<groupId>org.vafer</groupId>
|
||||
<artifactId>jdeb</artifactId>
|
||||
<version>1.3</version>
|
||||
<version>1.4</version>
|
||||
<extensions>true</extensions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
|
|
|
@ -34,4 +34,4 @@
|
|||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
</project>
|
||||
|
|
|
@ -46,8 +46,8 @@
|
|||
/^(
|
||||
index1 \s+
|
||||
(\d+) \s+
|
||||
(\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\d.\d\d\d[+-]\d\d:\d\d) \s+
|
||||
(\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\d.\d\d\dZ) \s+
|
||||
(\d+) \s+
|
||||
(\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\d.\d\d\d[+-]\d\d:\d\d) \s*
|
||||
(\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\d.\d\d\dZ) \s*
|
||||
)
|
||||
$/
|
||||
|
|
|
@ -18,4 +18,5 @@
|
|||
indices.upgrade:
|
||||
index: test_index
|
||||
|
||||
- match: {upgraded_indices.test_index: '/(\d\.)+\d/'}
|
||||
- match: {upgraded_indices.test_index.oldest_lucene_segment_version: '/(\d\.)+\d/'}
|
||||
- is_true: upgraded_indices.test_index.upgrade_version
|
||||
|
|
Loading…
Reference in New Issue