HDFS-3120. svn merge -c 1308614 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1308616 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-04-02 23:18:21 +00:00
parent 97f328dc40
commit 44b0307763
20 changed files with 19 additions and 39 deletions

View File

@ -70,6 +70,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3167. CLI-based driver for MiniDFSCluster. (Henry Robinson via atm) HDFS-3167. CLI-based driver for MiniDFSCluster. (Henry Robinson via atm)
HDFS-3120. Enable hsync and hflush by default. (eli)
IMPROVEMENTS IMPROVEMENTS
HDFS-2018. Move all journal stream management code into one place. HDFS-2018. Move all journal stream management code into one place.

View File

@ -300,8 +300,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
NameNodeResourceChecker nnResourceChecker; NameNodeResourceChecker nnResourceChecker;
private FsServerDefaults serverDefaults; private FsServerDefaults serverDefaults;
// allow appending to hdfs files
private boolean supportAppends = true; private boolean supportAppends;
private ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure = private ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure =
ReplaceDatanodeOnFailure.DEFAULT; ReplaceDatanodeOnFailure.DEFAULT;
@ -1812,9 +1812,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throws AccessControlException, SafeModeException, throws AccessControlException, SafeModeException,
FileAlreadyExistsException, FileNotFoundException, FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, IOException { ParentNotDirectoryException, IOException {
if (supportAppends == false) { if (!supportAppends) {
throw new UnsupportedOperationException("Append to hdfs not supported." + throw new UnsupportedOperationException(
" Please refer to dfs.support.append configuration parameter."); "Append is not enabled on this NameNode. Use the " +
DFS_SUPPORT_APPEND_KEY + " configuration option to enable it.");
} }
LocatedBlock lb = null; LocatedBlock lb = null;
writeLock(); writeLock();
@ -2894,9 +2895,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
//remove lease, close file //remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile); finalizeINodeFileUnderConstruction(src, pendingFile);
} else if (supportAppends) { } else {
// If this commit does not want to close the file, persist blocks // If this commit does not want to close the file, persist blocks
// only if append is supported or we're explicitly told to
dir.persistBlocks(src, pendingFile); dir.persistBlocks(src, pendingFile);
} }
} finally { } finally {
@ -4479,9 +4479,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} finally { } finally {
writeUnlock(); writeUnlock();
} }
if (supportAppends || persistBlocks) {
getEditLog().logSync(); getEditLog().logSync();
}
LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock); LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock);
} }
@ -4520,12 +4518,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
blockinfo.setExpectedLocations(descriptors); blockinfo.setExpectedLocations(descriptors);
// persist blocks only if append is supported
String src = leaseManager.findPath(pendingFile); String src = leaseManager.findPath(pendingFile);
if (supportAppends) {
dir.persistBlocks(src, pendingFile); dir.persistBlocks(src, pendingFile);
} }
}
// rename was successful. If any part of the renamed subtree had // rename was successful. If any part of the renamed subtree had
// files that were being written to, update with new filename. // files that were being written to, update with new filename.

View File

@ -836,4 +836,12 @@
</description> </description>
</property> </property>
<property>
<name>dfs.support.append</name>
<value>true</value>
<description>
Does HDFS allow appends to files?
</description>
</property>
</configuration> </configuration>

View File

@ -230,7 +230,6 @@ public class TestFiPipelines {
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
} }

View File

@ -163,7 +163,6 @@ public class TestStickyBit extends TestCase {
try { try {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem(); FileSystem hdfs = cluster.getFileSystem();

View File

@ -55,7 +55,6 @@ public class FileAppendTest4 {
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
} }
@BeforeClass @BeforeClass

View File

@ -40,7 +40,6 @@ public class TestClientProtocolForPipelineRecovery {
@Test public void testGetNewStamp() throws IOException { @Test public void testGetNewStamp() throws IOException {
int numDataNodes = 1; int numDataNodes = 1;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try { try {
cluster.waitActive(); cluster.waitActive();

View File

@ -207,7 +207,6 @@ public class TestDataTransferProtocol extends TestCase {
@Test public void testOpWrite() throws IOException { @Test public void testOpWrite() throws IOException {
int numDataNodes = 1; int numDataNodes = 1;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try { try {
cluster.waitActive(); cluster.waitActive();

View File

@ -85,7 +85,6 @@ public class TestFileAppend2 extends TestCase {
SimulatedFSDataset.setFactory(conf); SimulatedFSDataset.setFactory(conf);
} }
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
@ -338,7 +337,6 @@ public class TestFileAppend2 extends TestCase {
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000); conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes) .numDataNodes(numDatanodes)

View File

@ -70,7 +70,6 @@ public class TestFileAppend3 extends junit.framework.TestCase {
AppendTestUtil.LOG.info("setUp()"); AppendTestUtil.LOG.info("setUp()");
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
fs = (DistributedFileSystem)cluster.getFileSystem(); fs = (DistributedFileSystem)cluster.getFileSystem();

View File

@ -79,7 +79,6 @@ public class TestFileAppend4 {
if (simulatedStorage) { if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf); SimulatedFSDataset.setFactory(conf);
} }
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
// lower heartbeat interval for fast recognition of DN death // lower heartbeat interval for fast recognition of DN death
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,

View File

@ -311,7 +311,6 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
final int writeSize, final int writeSize,
Configuration conf Configuration conf
) throws IOException { ) throws IOException {
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, syncType == SyncType.APPEND);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
transferToAllowed); transferToAllowed);
init(conf); init(conf);

View File

@ -43,7 +43,6 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME); conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
// create cluster // create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

View File

@ -70,7 +70,6 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
final int ORG_FILE_SIZE = 3000; final int ORG_FILE_SIZE = 3000;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {

View File

@ -155,7 +155,6 @@ public class TestPipelines {
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
} }

View File

@ -82,7 +82,6 @@ public class TestQuota {
// Space quotas // Space quotas
final int DEFAULT_BLOCK_SIZE = 512; final int DEFAULT_BLOCK_SIZE = 512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(), assertTrue("Not a HDFS: "+fs.getUri(),
@ -531,7 +530,6 @@ public class TestQuota {
// set a smaller block size so that we can test with smaller // set a smaller block size so that we can test with smaller
// diskspace quotas // diskspace quotas
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512"); conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(), assertTrue("Not a HDFS: "+fs.getUri(),

View File

@ -56,8 +56,6 @@ public class TestReadWhileWriting {
@Test @Test
public void pipeline_02_03() throws Exception { public void pipeline_02_03() throws Exception {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
//enable append
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster // create cluster

View File

@ -54,7 +54,6 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
// create cluster // create cluster
System.out.println("Test 1*****************************"); System.out.println("Test 1*****************************");
@ -140,7 +139,6 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
System.out.println("Test 2************************************"); System.out.println("Test 2************************************");
// create cluster // create cluster
@ -215,7 +213,6 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
System.out.println("Test 3************************************"); System.out.println("Test 3************************************");
// create cluster // create cluster
@ -280,7 +277,6 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
System.out.println("Test 4************************************"); System.out.println("Test 4************************************");
// create cluster // create cluster

View File

@ -182,8 +182,6 @@ public class TestBlockTokenWithDFS {
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
conf.setInt("ipc.client.connect.max.retries", 0); conf.setInt("ipc.client.connect.max.retries", 0);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
return conf; return conf;
} }

View File

@ -76,7 +76,6 @@ public class TestDatanodeRestart {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive(); cluster.waitActive();
try { try {
@ -141,7 +140,6 @@ public class TestDatanodeRestart {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive(); cluster.waitActive();
try { try {