Merge branch 'trunk' into HDFS-6581

This commit is contained in:
arp 2014-09-25 12:55:59 -07:00
commit 5a5a7da3a3
6 changed files with 92 additions and 80 deletions

View File

@ -950,6 +950,11 @@ Release 2.6.0 - UNRELEASED
HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw)
HDFS-7111. TestSafeMode assumes Unix line endings in safe mode tip.
(cnauroth)
HDFS-7127. TestLeaseRecovery leaks MiniDFSCluster instances. (cnauroth)
Release 2.5.1 - 2014-09-05 Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodePr
import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Test; import org.junit.Test;
public class TestLeaseRecovery { public class TestLeaseRecovery {
@ -48,6 +49,15 @@ public class TestLeaseRecovery {
static final short REPLICATION_NUM = (short)3; static final short REPLICATION_NUM = (short)3;
private static final long LEASE_PERIOD = 300L; private static final long LEASE_PERIOD = 300L;
private MiniDFSCluster cluster;
@After
public void shutdown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
static void checkMetaInfo(ExtendedBlock b, DataNode dn static void checkMetaInfo(ExtendedBlock b, DataNode dn
) throws IOException { ) throws IOException {
TestInterDatanodeProtocol.checkMetaInfo(b, dn); TestInterDatanodeProtocol.checkMetaInfo(b, dn);
@ -82,79 +92,71 @@ public class TestLeaseRecovery {
final int ORG_FILE_SIZE = 3000; final int ORG_FILE_SIZE = 3000;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
try { //create a file
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); DistributedFileSystem dfs = cluster.getFileSystem();
cluster.waitActive(); String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//create a file //get block info for the last block
DistributedFileSystem dfs = cluster.getFileSystem(); LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
String filestr = "/foo"; dfs.dfs.getNamenode(), filestr);
Path filepath = new Path(filestr); DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertEquals(REPLICATION_NUM, datanodeinfos.length);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//get block info for the last block //connect to data nodes
LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( DataNode[] datanodes = new DataNode[REPLICATION_NUM];
dfs.dfs.getNamenode(), filestr); for(int i = 0; i < REPLICATION_NUM; i++) {
DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertEquals(REPLICATION_NUM, datanodeinfos.length); assertTrue(datanodes[i] != null);
//connect to data nodes
DataNode[] datanodes = new DataNode[REPLICATION_NUM];
for(int i = 0; i < REPLICATION_NUM; i++) {
datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
//verify Block Info
ExtendedBlock lastblock = locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for(int i = 0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock, datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);
// expire lease to trigger block recovery.
waitLeaseRecovery(cluster);
Block[] updatedmetainfo = new Block[REPLICATION_NUM];
long oldSize = lastblock.getNumBytes();
lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
dfs.dfs.getNamenode(), filestr).getBlock();
long currentGS = lastblock.getGenerationStamp();
for(int i = 0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
lastblock.getBlockPoolId(), lastblock.getBlockId());
assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
}
// verify that lease recovery does not occur when namenode is in safemode
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr = "/foo.safemode";
filepath = new Path(filestr);
dfs.create(filepath, (short)1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs, filepath, (short)1);
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
} }
finally {
if (cluster != null) {cluster.shutdown();} //verify Block Info
ExtendedBlock lastblock = locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for(int i = 0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock, datanodes[i]);
} }
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);
// expire lease to trigger block recovery.
waitLeaseRecovery(cluster);
Block[] updatedmetainfo = new Block[REPLICATION_NUM];
long oldSize = lastblock.getNumBytes();
lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
dfs.dfs.getNamenode(), filestr).getBlock();
long currentGS = lastblock.getGenerationStamp();
for(int i = 0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
lastblock.getBlockPoolId(), lastblock.getBlockId());
assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
}
// verify that lease recovery does not occur when namenode is in safemode
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr = "/foo.safemode";
filepath = new Path(filestr);
dfs.create(filepath, (short)1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs, filepath, (short)1);
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
} }
/** /**
@ -166,8 +168,7 @@ public class TestLeaseRecovery {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName()); UserGroupInformation.getCurrentUser().getShortUserName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
.build();
Path file = new Path("/testRecoveryFile"); Path file = new Path("/testRecoveryFile");
DistributedFileSystem dfs = cluster.getFileSystem(); DistributedFileSystem dfs = cluster.getFileSystem();
FSDataOutputStream out = dfs.create(file); FSDataOutputStream out = dfs.create(file);

View File

@ -66,6 +66,7 @@ public class TestSafeMode {
public static final Log LOG = LogFactory.getLog(TestSafeMode.class); public static final Log LOG = LogFactory.getLog(TestSafeMode.class);
private static final Path TEST_PATH = new Path("/test"); private static final Path TEST_PATH = new Path("/test");
private static final int BLOCK_SIZE = 1024; private static final int BLOCK_SIZE = 1024;
private static final String NEWLINE = System.getProperty("line.separator");
Configuration conf; Configuration conf;
MiniDFSCluster cluster; MiniDFSCluster cluster;
FileSystem fs; FileSystem fs;
@ -196,7 +197,7 @@ public class TestSafeMode {
String status = nn.getNamesystem().getSafemode(); String status = nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + assertEquals("Safe mode is ON. The reported blocks 0 needs additional " +
"15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "15 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE +
"The number of live datanodes 0 has reached the minimum number 0. " + "The number of live datanodes 0 has reached the minimum number 0. " +
"Safe mode will be turned off automatically once the thresholds " + "Safe mode will be turned off automatically once the thresholds " +
"have been reached.", status); "have been reached.", status);
@ -448,9 +449,9 @@ public class TestSafeMode {
String tipMsg = cluster.getNamesystem().getSafemode(); String tipMsg = cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg, assertTrue("Safemode tip message doesn't look right: " + tipMsg,
tipMsg.contains("The number of live datanodes 0 needs an additional " + tipMsg.contains("The number of live datanodes 0 needs an additional " +
"1 live datanodes to reach the minimum number 1.\n" + "1 live datanodes to reach the minimum number 1." +
"Safe mode will be turned off automatically")); NEWLINE + "Safe mode will be turned off automatically"));
// Start a datanode // Start a datanode
cluster.startDataNodes(conf, 1, true, null, null); cluster.startDataNodes(conf, 1, true, null, null);

View File

@ -372,6 +372,11 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-6104. TestJobHistoryParsing.testPartialJob fails in branch-2 MAPREDUCE-6104. TestJobHistoryParsing.testPartialJob fails in branch-2
(Mit Desai via jlowe) (Mit Desai via jlowe)
MAPREDUCE-6109. Fix minor typo in distcp -p usage text (Charles Lamb
via aw)
MAPREDUCE-6093. minor distcp doc edits (Charles Lamb via aw)
Release 2.5.1 - 2014-09-05 Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -119,13 +119,13 @@ $H3 Basic Usage
$H3 Update and Overwrite $H3 Update and Overwrite
`-update` is used to copy files from source that don't exist at the target `-update` is used to copy files from source that don't exist at the target
or differ than the target version. `-overwrite` overwrites target-files that or differ from the target version. `-overwrite` overwrites target-files that
exist at the target. exist at the target.
Update and Overwrite options warrant special attention, since their handling The Update and Overwrite options warrant special attention since their
of source-paths varies from the defaults in a very subtle manner. Consider a handling of source-paths varies from the defaults in a very subtle manner.
copy from `/source/first/` and `/source/second/` to `/target/`, where the Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
source paths have the following contents: where the source paths have the following contents:
hdfs://nn1:8020/source/first/1 hdfs://nn1:8020/source/first/1
hdfs://nn1:8020/source/first/2 hdfs://nn1:8020/source/first/2

View File

@ -54,7 +54,7 @@ public enum DistCpOptionSwitch {
"and timestamps. " + "and timestamps. " +
"raw.* xattrs are preserved when both the source and destination " + "raw.* xattrs are preserved when both the source and destination " +
"paths are in the /.reserved/raw hierarchy (HDFS only). raw.* xattr" + "paths are in the /.reserved/raw hierarchy (HDFS only). raw.* xattr" +
"preservation is independent of the -p flag." + "preservation is independent of the -p flag. " +
"Refer to the DistCp documentation for more details.")), "Refer to the DistCp documentation for more details.")),
/** /**