HDFS-6638. Merging change r1608905 from trunk to branch-2.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1608911 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-07-08 19:29:00 +00:00
parent 96bfab8dbc
commit 7c49fb7582
12 changed files with 30 additions and 3 deletions

View File

@ -12,6 +12,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6511. BlockManager#computeInvalidateWork() could do nothing. (Juan Yu via wang) HDFS-6511. BlockManager#computeInvalidateWork() could do nothing. (Juan Yu via wang)
HDFS-6638. Shorten test run time with a smaller retry timeout setting.
(Liang Xie via cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -51,6 +51,8 @@ public class TestBlockMissingException {
long blockSize = 1024L; long blockSize = 1024L;
int numBlocks = 4; int numBlocks = 4;
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
try { try {
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
dfs.waitActive(); dfs.waitActive();

View File

@ -64,6 +64,8 @@ public class TestBlockReaderLocalLegacy {
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName()); UserGroupInformation.getCurrentUser().getShortUserName());
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
return conf; return conf;
} }

View File

@ -73,7 +73,8 @@ public class TestClientReportBadBlock {
public void startUpCluster() throws IOException { public void startUpCluster() throws IOException {
// disable block scanner // disable block scanner
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
.build(); .build();
cluster.waitActive(); cluster.waitActive();

View File

@ -88,6 +88,8 @@ public class TestCrcCorruption {
@Test(timeout=50000) @Test(timeout=50000)
public void testCorruptionDuringWrt() throws Exception { public void testCorruptionDuringWrt() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
@ -152,7 +154,8 @@ public class TestCrcCorruption {
int numDataNodes = 2; int numDataNodes = 2;
short replFactor = 2; short replFactor = 2;
Random random = new Random(); Random random = new Random();
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive(); cluster.waitActive();
@ -334,6 +337,8 @@ public class TestCrcCorruption {
short replFactor = (short)numDataNodes; short replFactor = (short)numDataNodes;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try { try {

View File

@ -1463,7 +1463,8 @@ public class TestDFSShell {
Path root = new Path("/test/get"); Path root = new Path("/test/get");
final Path remotef = new Path(root, fname); final Path remotef = new Path(root, fname);
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
TestGetRunner runner = new TestGetRunner() { TestGetRunner runner = new TestGetRunner() {
private int count = 0; private int count = 0;
private final FsShell shell = new FsShell(conf); private final FsShell shell = new FsShell(conf);

View File

@ -202,6 +202,8 @@ public class TestEncryptedTransfer {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf); FileSystem fs = getFileSystem(conf);

View File

@ -54,6 +54,7 @@ public class TestMissingBlocksAlert {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
//minimize test delay //minimize test delay
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
int fileLen = 10*1024; int fileLen = 10*1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);

View File

@ -209,6 +209,8 @@ public class TestBlockTokenWithDFS {
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
conf.setInt("ipc.client.connect.max.retries", 0); conf.setInt("ipc.client.connect.max.retries", 0);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
return conf; return conf;
} }

View File

@ -612,6 +612,8 @@ public class TestFsck {
public void testCorruptBlock() throws Exception { public void testCorruptBlock() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
FileSystem fs = null; FileSystem fs = null;
DFSClient dfsClient = null; DFSClient dfsClient = null;
LocatedBlocks blocks = null; LocatedBlocks blocks = null;

View File

@ -64,6 +64,8 @@ public class TestListCorruptFileBlocks {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
@ -148,6 +150,8 @@ public class TestListCorruptFileBlocks {
// start populating repl queues immediately // start populating repl queues immediately
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
0f); 0f);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build(); cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode( cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);

View File

@ -63,6 +63,8 @@ public class TestFailoverWithBlockTokensEnabled {
public void startCluster() throws IOException { public void startCluster() throws IOException {
conf = new Configuration(); conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()) .nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(1) .numDataNodes(1)