diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7829015a903..e4295745987 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -244,6 +244,8 @@ Release 2.0.0 - UNRELEASED HDFS-3179. Improve the exception message thrown by DataStreamer when it failed to add a datanode. (szetszwo) + HDFS-2983. Relax the build version check to permit rolling upgrades within a release. (atm) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 1ac6828d865..11bb12252c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -146,6 +146,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2; public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained"; public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M + public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version"; + public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "2.0.0"; public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; @@ -262,6 +264,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address"; public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020; public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT; + public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version"; + public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "2.0.0"; public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable"; public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index e084862e82e..72f37ee5e70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -385,7 +385,7 @@ public class PBHelper { StorageInfoProto storage = info.getStorageInfo(); return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(), info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion(), - info.getBuildVersion()); + info.getBuildVersion(), info.getSoftwareVersion()); } public static NamenodeCommand convert(NamenodeCommandProto cmd) { @@ -611,13 +611,14 @@ public class PBHelper { .newBuilder(); return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) - .setKeys(PBHelper.convert(registration.getExportedKeys())).build(); + .setKeys(PBHelper.convert(registration.getExportedKeys())) + .setSoftwareVersion(registration.getSoftwareVersion()).build(); } public static DatanodeRegistration convert(DatanodeRegistrationProto proto) { return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()), PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto - .getKeys())); + .getKeys()), proto.getSoftwareVersion()); } public static DatanodeCommand convert(DatanodeCommandProto proto) { @@ -893,7 +894,8 @@ public class PBHelper { .setBlockPoolID(info.getBlockPoolID()) .setBuildVersion(info.getBuildVersion()) .setDistUpgradeVersion(info.getDistributedUpgradeVersion()) - .setStorageInfo(PBHelper.convert((StorageInfo)info)).build(); + .setStorageInfo(PBHelper.convert((StorageInfo)info)) + .setSoftwareVersion(info.getSoftwareVersion()).build(); } // Located Block Arrays and Lists diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java index 5f0b2604b0f..57bd214fb57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java @@ -32,7 +32,19 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; @InterfaceStability.Evolving public class IncorrectVersionException extends IOException { private static final long serialVersionUID = 1L; + + public IncorrectVersionException(String message) { + super(message); + } + public IncorrectVersionException(String minimumVersion, String reportedVersion, + String remoteDaemon, String thisDaemon) { + this("The reported " + remoteDaemon + " version is too low to communicate" + + " with this " + thisDaemon + ". " + remoteDaemon + " version: '" + + reportedVersion + "' Minimum " + remoteDaemon + " version: '" + + minimumVersion + "'"); + } + public IncorrectVersionException(int versionReported, String ofWhat) { this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION); } @@ -40,16 +52,9 @@ public class IncorrectVersionException extends IOException { public IncorrectVersionException(int versionReported, String ofWhat, int versionExpected) { - super("Unexpected version " - + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " - + versionReported + ". Expecting = " + versionExpected + "."); + this("Unexpected version " + + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " + + versionReported + ". Expecting = " + versionExpected + "."); } - public IncorrectVersionException(String versionReported, - String ofWhat, - String versionExpected) { - super("Unexpected version " - + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " - + versionReported + ". Expecting = " + versionExpected + "."); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 25e3a6781ef..f5d09b1fef2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -49,9 +48,11 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; @@ -178,17 +179,23 @@ class BPServiceActor implements Runnable { private void checkNNVersion(NamespaceInfo nsInfo) throws IncorrectVersionException { // build and layout versions should match - String nsBuildVer = nsInfo.getBuildVersion(); - String stBuildVer = Storage.getBuildVersion(); - if (!nsBuildVer.equals(stBuildVer)) { - LOG.warn("Data-node and name-node Build versions must be the same. " + - "Namenode build version: " + nsBuildVer + "Datanode " + - "build version: " + stBuildVer); - throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer); + String nnVersion = nsInfo.getSoftwareVersion(); + String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion(); + if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) { + IncorrectVersionException ive = new IncorrectVersionException( + minimumNameNodeVersion, nnVersion, "NameNode", "DataNode"); + LOG.warn(ive.getMessage()); + throw ive; + } + String dnVersion = VersionInfo.getVersion(); + if (!nnVersion.equals(dnVersion)) { + LOG.info("Reported NameNode version '" + nnVersion + "' does not match " + + "DataNode version '" + dnVersion + "' but is within acceptable " + + "limits. Note: This is normal during a rolling upgrade."); } if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) { - LOG.warn("Data-node and name-node layout versions must be the same." + + LOG.warn("DataNode and NameNode layout versions must be the same." + " Expected: "+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion()); throw new IncorrectVersionException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index e4bf9a676dc..92f1edc2fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -31,6 +31,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOW import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -58,6 +60,8 @@ class DNConf { final long deleteReportInterval; final long initialBlockReportDelay; final int writePacketSize; + + final String minimumNameNodeVersion; public DNConf(Configuration conf) { socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, @@ -111,5 +115,12 @@ class DNConf { this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, DFS_DATANODE_SYNCONCLOSE_DEFAULT); + this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, + DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT); + } + + // We get minimumNameNodeVersion via a method so it can be mocked out in tests. + String getMinimumNameNodeVersion() { + return this.minimumNameNodeVersion; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index a0c0e06bad0..eb4578d1de9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -677,6 +677,7 @@ public class DataNode extends Configured bpRegistration.setIpcPort(getIpcPort()); bpRegistration.setHostName(hostName); bpRegistration.setStorageID(getStorageId()); + bpRegistration.setSoftwareVersion(VersionInfo.getVersion()); StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID()); if (storageInfo == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 02a16b7cc64..47b577d007e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -230,7 +230,7 @@ public class BackupNode extends NameNode { * @throws UnregisteredNodeException if the registration is invalid */ void verifyJournalRequest(NamenodeRegistration reg) throws IOException { - verifyVersion(reg.getLayoutVersion()); + verifyLayoutVersion(reg.getLayoutVersion()); String errorMsg = null; int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID(); if (reg.getNamespaceID() != expectedNamespaceID) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 8c0877b2e8d..c1b182c92b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -108,6 +109,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -122,6 +124,7 @@ import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.VersionInfo; import com.google.protobuf.BlockingService; @@ -148,6 +151,8 @@ class NameNodeRpcServer implements NamenodeProtocols { /** The RPC server that listens to requests from clients */ protected final RPC.Server clientRpcServer; protected final InetSocketAddress clientRpcAddress; + + private final String minimumDataNodeVersion; public NameNodeRpcServer(Configuration conf, NameNode nn) throws IOException { @@ -262,6 +267,10 @@ class NameNodeRpcServer implements NamenodeProtocols { // The rpc-server port can be ephemeral... ensure we have the correct info this.clientRpcAddress = this.clientRpcServer.getListenerAddress(); nn.setRpcServerAddress(conf, clientRpcAddress); + + this.minimumDataNodeVersion = conf.get( + DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, + DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); } /** @@ -327,7 +336,7 @@ class NameNodeRpcServer implements NamenodeProtocols { @Override // NamenodeProtocol public NamenodeRegistration register(NamenodeRegistration registration) throws IOException { - verifyVersion(registration.getVersion()); + verifyLayoutVersion(registration.getVersion()); NamenodeRegistration myRegistration = nn.setRegistration(); namesystem.registerBackupNode(registration, myRegistration); return myRegistration; @@ -830,9 +839,10 @@ class NameNodeRpcServer implements NamenodeProtocols { @Override // DatanodeProtocol - public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg - ) throws IOException { - verifyVersion(nodeReg.getVersion()); + public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) + throws IOException { + verifyLayoutVersion(nodeReg.getVersion()); + verifySoftwareVersion(nodeReg); namesystem.registerDatanode(nodeReg); return nodeReg; } @@ -917,7 +927,7 @@ class NameNodeRpcServer implements NamenodeProtocols { * @throws UnregisteredNodeException if the registration is invalid */ void verifyRequest(NodeRegistration nodeReg) throws IOException { - verifyVersion(nodeReg.getVersion()); + verifyLayoutVersion(nodeReg.getVersion()); if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) { LOG.warn("Invalid registrationID - expected: " + namesystem.getRegistrationID() + " received: " @@ -991,10 +1001,39 @@ class NameNodeRpcServer implements NamenodeProtocols { * @param version * @throws IOException */ - void verifyVersion(int version) throws IOException { + void verifyLayoutVersion(int version) throws IOException { if (version != HdfsConstants.LAYOUT_VERSION) throw new IncorrectVersionException(version, "data node"); } + + private void verifySoftwareVersion(DatanodeRegistration dnReg) + throws IncorrectVersionException { + String dnVersion = dnReg.getSoftwareVersion(); + if (VersionUtil.compareVersions(dnVersion, minimumDataNodeVersion) < 0) { + IncorrectVersionException ive = new IncorrectVersionException( + minimumDataNodeVersion, dnVersion, "DataNode", "NameNode"); + LOG.warn(ive.getMessage() + " DN: " + dnReg); + throw ive; + } + String nnVersion = VersionInfo.getVersion(); + if (!dnVersion.equals(nnVersion)) { + String messagePrefix = "Reported DataNode version '" + dnVersion + + "' of DN " + dnReg + " does not match NameNode version '" + + nnVersion + "'"; + long nnCTime = nn.getFSImage().getStorage().getCTime(); + long dnCTime = dnReg.getStorageInfo().getCTime(); + if (nnCTime != dnCTime) { + IncorrectVersionException ive = new IncorrectVersionException( + messagePrefix + " and CTime of DN ('" + dnCTime + + "') does not match CTime of NN ('" + nnCTime + "')"); + LOG.warn(ive); + throw ive; + } else { + LOG.info(messagePrefix + + ". Note: This is normal during a rolling upgrade."); + } + } + } private static String getClientMachine() { String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java index a5522ced519..dda0a6fbee0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java @@ -37,12 +37,14 @@ public class DatanodeRegistration extends DatanodeID private StorageInfo storageInfo; private ExportedBlockKeys exportedKeys; + private String softwareVersion; public DatanodeRegistration(DatanodeID dn, StorageInfo info, - ExportedBlockKeys keys) { + ExportedBlockKeys keys, String softwareVersion) { super(dn); this.storageInfo = info; this.exportedKeys = keys; + this.softwareVersion = softwareVersion; } public DatanodeRegistration(String ipAddr, int xferPort) { @@ -71,6 +73,14 @@ public class DatanodeRegistration extends DatanodeID public ExportedBlockKeys getExportedKeys() { return exportedKeys; } + + public void setSoftwareVersion(String softwareVersion) { + this.softwareVersion = softwareVersion; + } + + public String getSoftwareVersion() { + return softwareVersion; + } @Override // NodeRegistration public int getVersion() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index f0d46b25b70..eb91a178619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorage; +import org.apache.hadoop.util.VersionInfo; /** * NamespaceInfo is returned by the name-node in reply @@ -38,6 +39,7 @@ public class NamespaceInfo extends StorageInfo { String buildVersion; int distributedUpgradeVersion; String blockPoolID = ""; // id of the block pool + String softwareVersion; public NamespaceInfo() { super(); @@ -45,16 +47,18 @@ public class NamespaceInfo extends StorageInfo { } public NamespaceInfo(int nsID, String clusterID, String bpID, - long cT, int duVersion, String buildVersion) { + long cT, int duVersion, String buildVersion, String softwareVersion) { super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT); blockPoolID = bpID; this.buildVersion = buildVersion; this.distributedUpgradeVersion = duVersion; + this.softwareVersion = softwareVersion; } public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, int duVersion) { - this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion()); + this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion(), + VersionInfo.getVersion()); } public String getBuildVersion() { @@ -68,6 +72,10 @@ public class NamespaceInfo extends StorageInfo { public String getBlockPoolID() { return blockPoolID; } + + public String getSoftwareVersion() { + return softwareVersion; + } public String toString(){ return super.toString() + ";bpid=" + blockPoolID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java new file mode 100644 index 00000000000..59aa5e128ed --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public abstract class VersionUtil { + + private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)"); + + /** + * This function splits the two versions on "." and performs a + * naturally-ordered comparison of the resulting components. For example, the + * version string "0.3" is considered to precede "0.20", despite the fact that + * lexical comparison would consider "0.20" to precede "0.3". This method of + * comparison is similar to the method used by package versioning systems like + * deb and RPM. + * + * Version components are compared numerically whenever possible, however a + * version component can contain non-numeric characters. When a non-numeric + * group of characters is found in a version component, this group is compared + * with the similarly-indexed group in the other version component. If the + * other group is numeric, then the numeric group is considered to precede the + * non-numeric group. If both groups are non-numeric, then a lexical + * comparison is performed. + * + * If two versions have a different number of components, then only the lower + * number of components are compared. If those components are identical + * between the two versions, then the version with fewer components is + * considered to precede the version with more components. + * + * This function returns a negative integer if version1 precedes version2, a + * positive integer if version2 precedes version1, and 0 if and only if the + * two versions' components are identical in value and cardinality. + * + * @param version1 + * the first version to compare + * @param version2 + * the second version to compare + * @return a negative integer if version1 precedes version2, a positive + * integer if version2 precedes version1, and 0 if and only if the two + * versions are equal. + */ + public static int compareVersions(String version1, String version2) { + String[] version1Parts = version1.split("\\."); + String[] version2Parts = version2.split("\\."); + + for (int i = 0; i < version1Parts.length && i < version2Parts.length; i++) { + String component1 = version1Parts[i]; + String component2 = version2Parts[i]; + if (!component1.equals(component2)) { + Matcher matcher1 = COMPONENT_GROUPS.matcher(component1); + Matcher matcher2 = COMPONENT_GROUPS.matcher(component2); + + while (matcher1.find() && matcher2.find()) { + String group1 = matcher1.group(); + String group2 = matcher2.group(); + if (!group1.equals(group2)) { + if (isNumeric(group1) && isNumeric(group2)) { + return Integer.parseInt(group1) - Integer.parseInt(group2); + } else if (!isNumeric(group1) && !isNumeric(group2)) { + return group1.compareTo(group2); + } else { + return isNumeric(group1) ? -1 : 1; + } + } + } + return component1.length() - component2.length(); + } + } + return version1Parts.length - version2Parts.length; + } + + private static boolean isNumeric(String s) { + try { + Integer.parseInt(s); + return true; + } catch (NumberFormatException nfe) { + return false; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 046e07f7fa6..f5f36e85bf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -33,6 +33,7 @@ message DatanodeRegistrationProto { required DatanodeIDProto datanodeID = 1; // Datanode information required StorageInfoProto storageInfo = 2; // Node information required ExportedBlockKeysProto keys = 3; // Block keys + required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0" } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index a0b055642f3..09b72b62bfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -303,10 +303,11 @@ message RemoteEditLogManifestProto { * Namespace information that describes namespace on a namenode */ message NamespaceInfoProto { - required string buildVersion = 1; // Software build version + required string buildVersion = 1; // Software revision version (e.g. an svn or git revision) required uint32 distUpgradeVersion = 2; // Distributed upgrade version required string blockPoolID = 3; // block pool used by the namespace - required StorageInfoProto storageInfo = 4;// Noe information + required StorageInfoProto storageInfo = 4;// Node information + required string softwareVersion = 5; // Software version number (e.g. 2.0.0) } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 2cde7ed4760..ca894a8ef04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -17,24 +17,40 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.*; + import java.net.InetSocketAddress; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.DFSClient; -import junit.framework.TestCase; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.VersionInfo; +import org.junit.Test; /** * This class tests that a file need not be closed before its * data can be read by another client. */ -public class TestDatanodeRegistration extends TestCase { +public class TestDatanodeRegistration { + + public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class); /** * Regression test for HDFS-894 ensures that, when datanodes * are restarted, the new IPC port is registered with the * namenode. */ + @Test public void testChangeIpcPort() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -74,4 +90,101 @@ public class TestDatanodeRegistration extends TestCase { } } } + + @Test + public void testRegistrationWithDifferentSoftwareVersions() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0"); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime(); + StorageInfo mockStorageInfo = mock(StorageInfo.class); + doReturn(nnCTime).when(mockStorageInfo).getCTime(); + + DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); + doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn("fake-storage-id").when(mockDnReg).getStorageID(); + doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); + + // Should succeed when software versions are the same. + doReturn("3.0.0").when(mockDnReg).getSoftwareVersion(); + rpcServer.registerDatanode(mockDnReg); + + // Should succeed when software version of DN is above minimum required by NN. + doReturn("4.0.0").when(mockDnReg).getSoftwareVersion(); + rpcServer.registerDatanode(mockDnReg); + + // Should fail when software version of DN is below minimum required by NN. + doReturn("2.0.0").when(mockDnReg).getSoftwareVersion(); + try { + rpcServer.registerDatanode(mockDnReg); + fail("Should not have been able to register DN with too-low version."); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "The reported DataNode version is too low", ive); + LOG.info("Got expected exception", ive); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test + public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() + throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0"); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime(); + StorageInfo mockStorageInfo = mock(StorageInfo.class); + doReturn(nnCTime).when(mockStorageInfo).getCTime(); + + DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); + doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn("fake-storage-id").when(mockDnReg).getStorageID(); + doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); + + // Should succeed when software versions are the same and CTimes are the + // same. + doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion(); + rpcServer.registerDatanode(mockDnReg); + + // Should succeed when software versions are the same and CTimes are + // different. + doReturn(nnCTime + 1).when(mockStorageInfo).getCTime(); + rpcServer.registerDatanode(mockDnReg); + + // Should fail when software version of DN is different from NN and CTimes + // are different. + doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion(); + try { + rpcServer.registerDatanode(mockDnReg); + fail("Should not have been able to register DN with different software" + + " versions and CTimes"); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "does not match CTime of NN", ive); + LOG.info("Got expected exception", ive); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 652aaf8ae00..a6280d319aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -429,12 +429,13 @@ public class TestPBHelper { ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys); DatanodeRegistration reg = new DatanodeRegistration(dnId, - new StorageInfo(), expKeys); + new StorageInfo(), expKeys, "3.0.0"); DatanodeRegistrationProto proto = PBHelper.convert(reg); DatanodeRegistration reg2 = PBHelper.convert(proto); compare(reg.getStorageInfo(), reg2.getStorageInfo()); compare(reg.getExportedKeys(), reg2.getExportedKeys()); compare((DatanodeID)reg, (DatanodeID)reg2); + assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion()); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index e44cf04f905..a55ca0b0a4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -18,50 +18,105 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; import java.net.InetSocketAddress; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.VersionInfo; +import org.junit.Before; import org.junit.Test; -import org.mockito.Mockito; - public class TestDatanodeRegister { public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class); // Invalid address - static final InetSocketAddress INVALID_ADDR = + private static final InetSocketAddress INVALID_ADDR = new InetSocketAddress("127.0.0.1", 1); + + private BPServiceActor actor; + NamespaceInfo fakeNsInfo; + DNConf mockDnConf; + + @Before + public void setUp() throws IOException { + mockDnConf = mock(DNConf.class); + doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion(); + + DataNode mockDN = mock(DataNode.class); + doReturn(true).when(mockDN).shouldRun(); + doReturn(mockDnConf).when(mockDN).getDnConf(); + + BPOfferService mockBPOS = mock(BPOfferService.class); + doReturn(mockDN).when(mockBPOS).getDataNode(); + + actor = new BPServiceActor(INVALID_ADDR, mockBPOS); + + fakeNsInfo = mock(NamespaceInfo.class); + // Return a a good software version. + doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion(); + // Return a good layout version for now. + doReturn(HdfsConstants.LAYOUT_VERSION).when(fakeNsInfo).getLayoutVersion(); + + DatanodeProtocolClientSideTranslatorPB fakeDnProt = + mock(DatanodeProtocolClientSideTranslatorPB.class); + when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo); + actor.setNameNode(fakeDnProt); + } @Test - public void testDataNodeRegister() throws Exception { - DataNode mockDN = mock(DataNode.class); - Mockito.doReturn(true).when(mockDN).shouldRun(); + public void testSoftwareVersionDifferences() throws Exception { + // We expect no exception to be thrown when the software versions match. + assertEquals(VersionInfo.getVersion(), + actor.retrieveNamespaceInfo().getSoftwareVersion()); - BPOfferService mockBPOS = Mockito.mock(BPOfferService.class); - Mockito.doReturn(mockDN).when(mockBPOS).getDataNode(); + // We expect no exception to be thrown when the min NN version is below the + // reported NN version. + doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion(); + doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion(); + assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion()); - BPServiceActor actor = new BPServiceActor(INVALID_ADDR, mockBPOS); - - NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class); - when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion"); - DatanodeProtocolClientSideTranslatorPB fakeDNProt = - mock(DatanodeProtocolClientSideTranslatorPB.class); - when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo); - - actor.setNameNode( fakeDNProt ); - try { + // When the NN reports a version that's too low, throw an exception. + doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion(); + doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion(); + try { actor.retrieveNamespaceInfo(); - fail("register() did not throw exception! " + - "Expected: IncorrectVersionException"); - } catch (IncorrectVersionException ie) { - LOG.info("register() returned correct Exception: IncorrectVersionException"); + fail("Should have thrown an exception for NN with too-low version"); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "The reported NameNode version is too low", ive); + LOG.info("Got expected exception", ive); + } + } + + @Test + public void testDifferentLayoutVersions() throws Exception { + // We expect no exceptions to be thrown when the layout versions match. + assertEquals(HdfsConstants.LAYOUT_VERSION, + actor.retrieveNamespaceInfo().getLayoutVersion()); + + // We expect an exception to be thrown when the NN reports a layout version + // different from that of the DN. + doReturn(HdfsConstants.LAYOUT_VERSION * 1000).when(fakeNsInfo) + .getLayoutVersion(); + try { + actor.retrieveNamespaceInfo(); + fail("Should have failed to retrieve NS info from DN with bad layout version"); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "Unexpected version of namenode", ive); + LOG.info("Got expected exception", ive); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 4b8f225f1e2..ec5b8a72e2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -58,6 +58,7 @@ import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.Groups; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -783,6 +784,7 @@ public class NNThroughputBenchmark { String hostName = DNS.getDefaultHost("default", "default"); dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx)); dnRegistration.setHostName(hostName); + dnRegistration.setSoftwareVersion(VersionInfo.getVersion()); this.blocks = new ArrayList(blockCapacity); this.nrBlocks = 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java new file mode 100644 index 00000000000..c2537fd515a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import static org.junit.Assert.*; + +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; + +public class TestVersionUtil { + + @Test + public void testCompareVersions() { + // Equal versions are equal. + assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0")); + assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a")); + assertEquals(0, VersionUtil.compareVersions("1", "1")); + + // Assert that lower versions are lower, and higher versions are higher. + assertExpectedValues("1", "2.0.0"); + assertExpectedValues("1.0.0", "2"); + assertExpectedValues("1.0.0", "2.0.0"); + assertExpectedValues("1.0", "2.0.0"); + assertExpectedValues("1.0.0", "2.0.0"); + assertExpectedValues("1.0.0", "1.0.0a"); + assertExpectedValues("1.0.0.0", "2.0.0"); + assertExpectedValues("1.0.0", "1.0.0-dev"); + assertExpectedValues("1.0.0", "1.0.1"); + assertExpectedValues("1.0.0", "1.0.2"); + assertExpectedValues("1.0.0", "1.1.0"); + assertExpectedValues("2.0.0", "10.0.0"); + assertExpectedValues("1.0.0", "1.0.0a"); + assertExpectedValues("1.0.2a", "1.0.10"); + assertExpectedValues("1.0.2a", "1.0.2b"); + assertExpectedValues("1.0.2a", "1.0.2ab"); + assertExpectedValues("1.0.0a1", "1.0.0a2"); + assertExpectedValues("1.0.0a2", "1.0.0a10"); + assertExpectedValues("1.0", "1.a"); + assertExpectedValues("1.0", "1.a0"); + } + + private static void assertExpectedValues(String lower, String higher) { + assertTrue(VersionUtil.compareVersions(lower, higher) < 0); + assertTrue(VersionUtil.compareVersions(higher, lower) > 0); + } + +}