HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1542176 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
132a8ff7c7
commit
cd768489f3
|
@ -107,3 +107,5 @@ IMPROVEMENTS:
|
|||
HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
|
||||
(Junping Du via Arpit Agarwal)
|
||||
|
||||
HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832. (Arpit Agarwal)
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ public class DataStorage extends Storage {
|
|||
return datanodeUuid;
|
||||
}
|
||||
|
||||
synchronized void setDatanodeUuid(String newDatanodeUuid) {
|
||||
public synchronized void setDatanodeUuid(String newDatanodeUuid) {
|
||||
this.datanodeUuid = newDatanodeUuid;
|
||||
}
|
||||
|
||||
|
@ -292,8 +292,7 @@ public class DataStorage extends Storage {
|
|||
props.setProperty("storageID", sd.getStorageUuid());
|
||||
|
||||
String datanodeUuid = getDatanodeUuid();
|
||||
if (LayoutVersion.supports(Feature.ADD_DATANODE_AND_STORAGE_UUIDS,
|
||||
layoutVersion) && datanodeUuid != null) {
|
||||
if (datanodeUuid != null) {
|
||||
props.setProperty("datanodeUuid", datanodeUuid);
|
||||
}
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ public class TestDFSStartupVersions {
|
|||
* this iterations version 3-tuple
|
||||
* </pre>
|
||||
*/
|
||||
@Test
|
||||
@Test (timeout=300000)
|
||||
public void testVersions() throws Exception {
|
||||
UpgradeUtilities.initialize();
|
||||
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,
|
||||
|
|
|
@ -454,6 +454,7 @@ public class UpgradeUtilities {
|
|||
public static void createDataNodeVersionFile(File[] parent,
|
||||
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
|
||||
DataStorage storage = new DataStorage(version);
|
||||
storage.setDatanodeUuid("FixedDatanodeUuid");
|
||||
|
||||
File[] versionFiles = new File[parent.length];
|
||||
for (int i = 0; i < parent.length; i++) {
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TestListCorruptFileBlocks {
|
|||
static Log LOG = NameNode.stateChangeLog;
|
||||
|
||||
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
|
||||
@Test
|
||||
@Test (timeout=300000)
|
||||
public void testListCorruptFilesCorruptedBlock() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
Random random = new Random();
|
||||
|
@ -131,7 +131,7 @@ public class TestListCorruptFileBlocks {
|
|||
/**
|
||||
* Check that listCorruptFileBlocks works while the namenode is still in safemode.
|
||||
*/
|
||||
@Test
|
||||
@Test (timeout=300000)
|
||||
public void testListCorruptFileBlocksInSafeMode() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
Random random = new Random();
|
||||
|
@ -262,7 +262,7 @@ public class TestListCorruptFileBlocks {
|
|||
}
|
||||
|
||||
// deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
|
||||
@Test
|
||||
@Test (timeout=300000)
|
||||
public void testlistCorruptFileBlocks() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
|
||||
|
@ -372,7 +372,7 @@ public class TestListCorruptFileBlocks {
|
|||
/**
|
||||
* test listCorruptFileBlocks in DistributedFileSystem
|
||||
*/
|
||||
@Test
|
||||
@Test (timeout=300000)
|
||||
public void testlistCorruptFileBlocksDFS() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
|
||||
|
@ -445,7 +445,7 @@ public class TestListCorruptFileBlocks {
|
|||
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
|
||||
* calls.
|
||||
*/
|
||||
@Test
|
||||
@Test (timeout=300000)
|
||||
public void testMaxCorruptFiles() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
|
|
Loading…
Reference in New Issue