svn merge -c 1324892 from trunk for HDFS-3179. Improve the exception message thrown by DataStreamer when it failed to add a datanode.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1324894 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
50c13a44af
commit
415e7fecb1
|
@ -241,6 +241,9 @@ Release 2.0.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-3249. Use ToolRunner.confirmPrompt in NameNode (todd)
|
HDFS-3249. Use ToolRunner.confirmPrompt in NameNode (todd)
|
||||||
|
|
||||||
|
HDFS-3179. Improve the exception message thrown by DataStreamer when
|
||||||
|
it failed to add a datanode. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2477. Optimize computing the diff between a block report and the
|
HDFS-2477. Optimize computing the diff between a block report and the
|
||||||
|
|
|
@ -128,7 +128,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
||||||
private volatile boolean appendChunk = false; // appending to existing partial block
|
private volatile boolean appendChunk = false; // appending to existing partial block
|
||||||
private long initialFileSize = 0; // at time of file open
|
private long initialFileSize = 0; // at time of file open
|
||||||
private Progressable progress;
|
private Progressable progress;
|
||||||
private short blockReplication; // replication factor of file
|
private final short blockReplication; // replication factor of file
|
||||||
|
|
||||||
private class Packet {
|
private class Packet {
|
||||||
long seqno; // sequencenumber of buffer in block
|
long seqno; // sequencenumber of buffer in block
|
||||||
|
@ -775,9 +775,13 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
||||||
private int findNewDatanode(final DatanodeInfo[] original
|
private int findNewDatanode(final DatanodeInfo[] original
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
if (nodes.length != original.length + 1) {
|
if (nodes.length != original.length + 1) {
|
||||||
throw new IOException("Failed to add a datanode:"
|
throw new IOException("Failed to add a datanode. "
|
||||||
+ " nodes.length != original.length + 1, nodes="
|
+ "User may turn off this feature by setting "
|
||||||
+ Arrays.asList(nodes) + ", original=" + Arrays.asList(original));
|
+ DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY
|
||||||
|
+ " in configuration, where the current policy is "
|
||||||
|
+ dfsClient.dtpReplaceDatanodeOnFailure
|
||||||
|
+ ". (Nodes: current=" + Arrays.asList(nodes)
|
||||||
|
+ ", original=" + Arrays.asList(original) + ")");
|
||||||
}
|
}
|
||||||
for(int i = 0; i < nodes.length; i++) {
|
for(int i = 0; i < nodes.length; i++) {
|
||||||
int j = 0;
|
int j = 0;
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
@ -234,4 +235,56 @@ public class TestReplaceDatanodeOnFailure {
|
||||||
Assert.assertEquals(REPLICATION, dfsout.getNumCurrentReplicas());
|
Assert.assertEquals(REPLICATION, dfsout.getNumCurrentReplicas());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAppend() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
final short REPLICATION = (short)3;
|
||||||
|
|
||||||
|
Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT, ReplaceDatanodeOnFailure.get(conf));
|
||||||
|
|
||||||
|
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
|
||||||
|
).numDataNodes(1).build();
|
||||||
|
|
||||||
|
try {
|
||||||
|
final DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();
|
||||||
|
final Path f = new Path(DIR, "testAppend");
|
||||||
|
|
||||||
|
{
|
||||||
|
LOG.info("create an empty file " + f);
|
||||||
|
fs.create(f, REPLICATION).close();
|
||||||
|
final FileStatus status = fs.getFileStatus(f);
|
||||||
|
Assert.assertEquals(REPLICATION, status.getReplication());
|
||||||
|
Assert.assertEquals(0L, status.getLen());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
final byte[] bytes = new byte[1000];
|
||||||
|
{
|
||||||
|
LOG.info("append " + bytes.length + " bytes to " + f);
|
||||||
|
final FSDataOutputStream out = fs.append(f);
|
||||||
|
out.write(bytes);
|
||||||
|
out.close();
|
||||||
|
|
||||||
|
final FileStatus status = fs.getFileStatus(f);
|
||||||
|
Assert.assertEquals(REPLICATION, status.getReplication());
|
||||||
|
Assert.assertEquals(bytes.length, status.getLen());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
LOG.info("append another " + bytes.length + " bytes to " + f);
|
||||||
|
try {
|
||||||
|
final FSDataOutputStream out = fs.append(f);
|
||||||
|
out.write(bytes);
|
||||||
|
out.close();
|
||||||
|
|
||||||
|
Assert.fail();
|
||||||
|
} catch(IOException ioe) {
|
||||||
|
LOG.info("This exception is expected", ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {cluster.shutdown();}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue