Merge branch 'trunk' into HDFS-6581

This commit is contained in:
arp 2014-09-25 12:55:59 -07:00
commit 5a5a7da3a3
6 changed files with 92 additions and 80 deletions

View File

@ -950,6 +950,11 @@ Release 2.6.0 - UNRELEASED
HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw)
HDFS-7111. TestSafeMode assumes Unix line endings in safe mode tip.
(cnauroth)
HDFS-7127. TestLeaseRecovery leaks MiniDFSCluster instances. (cnauroth)
Release 2.5.1 - 2014-09-05 Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodePr
import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Test; import org.junit.Test;
public class TestLeaseRecovery { public class TestLeaseRecovery {
@ -48,6 +49,15 @@ public class TestLeaseRecovery {
static final short REPLICATION_NUM = (short)3; static final short REPLICATION_NUM = (short)3;
private static final long LEASE_PERIOD = 300L; private static final long LEASE_PERIOD = 300L;
private MiniDFSCluster cluster;
@After
public void shutdown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
static void checkMetaInfo(ExtendedBlock b, DataNode dn static void checkMetaInfo(ExtendedBlock b, DataNode dn
) throws IOException { ) throws IOException {
TestInterDatanodeProtocol.checkMetaInfo(b, dn); TestInterDatanodeProtocol.checkMetaInfo(b, dn);
@ -82,9 +92,6 @@ public class TestLeaseRecovery {
final int ORG_FILE_SIZE = 3000; final int ORG_FILE_SIZE = 3000;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive(); cluster.waitActive();
@ -116,7 +123,6 @@ public class TestLeaseRecovery {
checkMetaInfo(lastblock, datanodes[i]); checkMetaInfo(lastblock, datanodes[i]);
} }
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName); cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);
@ -152,10 +158,6 @@ public class TestLeaseRecovery {
cluster.getNameNodeRpc().setSafeMode( cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
} }
finally {
if (cluster != null) {cluster.shutdown();}
}
}
/** /**
* Block Recovery when the meta file not having crcs for all chunks in block * Block Recovery when the meta file not having crcs for all chunks in block
@ -166,8 +168,7 @@ public class TestLeaseRecovery {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName()); UserGroupInformation.getCurrentUser().getShortUserName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
.build();
Path file = new Path("/testRecoveryFile"); Path file = new Path("/testRecoveryFile");
DistributedFileSystem dfs = cluster.getFileSystem(); DistributedFileSystem dfs = cluster.getFileSystem();
FSDataOutputStream out = dfs.create(file); FSDataOutputStream out = dfs.create(file);

View File

@ -66,6 +66,7 @@ public class TestSafeMode {
public static final Log LOG = LogFactory.getLog(TestSafeMode.class); public static final Log LOG = LogFactory.getLog(TestSafeMode.class);
private static final Path TEST_PATH = new Path("/test"); private static final Path TEST_PATH = new Path("/test");
private static final int BLOCK_SIZE = 1024; private static final int BLOCK_SIZE = 1024;
private static final String NEWLINE = System.getProperty("line.separator");
Configuration conf; Configuration conf;
MiniDFSCluster cluster; MiniDFSCluster cluster;
FileSystem fs; FileSystem fs;
@ -196,7 +197,7 @@ public class TestSafeMode {
String status = nn.getNamesystem().getSafemode(); String status = nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + assertEquals("Safe mode is ON. The reported blocks 0 needs additional " +
"15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "15 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE +
"The number of live datanodes 0 has reached the minimum number 0. " + "The number of live datanodes 0 has reached the minimum number 0. " +
"Safe mode will be turned off automatically once the thresholds " + "Safe mode will be turned off automatically once the thresholds " +
"have been reached.", status); "have been reached.", status);
@ -449,8 +450,8 @@ public class TestSafeMode {
String tipMsg = cluster.getNamesystem().getSafemode(); String tipMsg = cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg, assertTrue("Safemode tip message doesn't look right: " + tipMsg,
tipMsg.contains("The number of live datanodes 0 needs an additional " + tipMsg.contains("The number of live datanodes 0 needs an additional " +
"1 live datanodes to reach the minimum number 1.\n" + "1 live datanodes to reach the minimum number 1." +
"Safe mode will be turned off automatically")); NEWLINE + "Safe mode will be turned off automatically"));
// Start a datanode // Start a datanode
cluster.startDataNodes(conf, 1, true, null, null); cluster.startDataNodes(conf, 1, true, null, null);

View File

@ -372,6 +372,11 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-6104. TestJobHistoryParsing.testPartialJob fails in branch-2 MAPREDUCE-6104. TestJobHistoryParsing.testPartialJob fails in branch-2
(Mit Desai via jlowe) (Mit Desai via jlowe)
MAPREDUCE-6109. Fix minor typo in distcp -p usage text (Charles Lamb
via aw)
MAPREDUCE-6093. minor distcp doc edits (Charles Lamb via aw)
Release 2.5.1 - 2014-09-05 Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -119,13 +119,13 @@ $H3 Basic Usage
$H3 Update and Overwrite $H3 Update and Overwrite
`-update` is used to copy files from source that don't exist at the target `-update` is used to copy files from source that don't exist at the target
or differ than the target version. `-overwrite` overwrites target-files that or differ from the target version. `-overwrite` overwrites target-files that
exist at the target. exist at the target.
Update and Overwrite options warrant special attention, since their handling The Update and Overwrite options warrant special attention since their
of source-paths varies from the defaults in a very subtle manner. Consider a handling of source-paths varies from the defaults in a very subtle manner.
copy from `/source/first/` and `/source/second/` to `/target/`, where the Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
source paths have the following contents: where the source paths have the following contents:
hdfs://nn1:8020/source/first/1 hdfs://nn1:8020/source/first/1
hdfs://nn1:8020/source/first/2 hdfs://nn1:8020/source/first/2