HDFS-5466. Update storage IDs when the pipeline is updated. (Contributed by szetszwo)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1539203 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d2b7b6589d
commit
3e3fea7cd4
@ -71,5 +71,8 @@ IMPROVEMENTS:
|
|||||||
HDFS-5455. NN should update storageMap on first heartbeat. (Arpit Agarwal)
|
HDFS-5455. NN should update storageMap on first heartbeat. (Arpit Agarwal)
|
||||||
|
|
||||||
HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry.
|
HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry.
|
||||||
(Contributed bt szetszwo)
|
(Contributed by szetszwo)
|
||||||
|
|
||||||
|
HDFS-5466. Update storage IDs when the pipeline is updated. (Contributed
|
||||||
|
by szetszwo)
|
||||||
|
|
||||||
|
@ -402,8 +402,7 @@ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setup pipeline to append to the last block XXX retries??
|
// setup pipeline to append to the last block XXX retries??
|
||||||
nodes = lastBlock.getLocations();
|
setPipeline(lastBlock);
|
||||||
storageIDs = lastBlock.getStorageIDs();
|
|
||||||
errorIndex = -1; // no errors yet.
|
errorIndex = -1; // no errors yet.
|
||||||
if (nodes.length < 1) {
|
if (nodes.length < 1) {
|
||||||
throw new IOException("Unable to retrieve blocks locations " +
|
throw new IOException("Unable to retrieve blocks locations " +
|
||||||
@ -412,6 +411,14 @@ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void setPipeline(LocatedBlock lb) {
|
||||||
|
setPipeline(lb.getLocations(), lb.getStorageIDs());
|
||||||
|
}
|
||||||
|
private void setPipeline(DatanodeInfo[] nodes, String[] storageIDs) {
|
||||||
|
this.nodes = nodes;
|
||||||
|
this.storageIDs = storageIDs;
|
||||||
|
}
|
||||||
|
|
||||||
private void setFavoredNodes(String[] favoredNodes) {
|
private void setFavoredNodes(String[] favoredNodes) {
|
||||||
this.favoredNodes = favoredNodes;
|
this.favoredNodes = favoredNodes;
|
||||||
@ -435,7 +442,7 @@ private void endBlock() {
|
|||||||
this.setName("DataStreamer for file " + src);
|
this.setName("DataStreamer for file " + src);
|
||||||
closeResponder();
|
closeResponder();
|
||||||
closeStream();
|
closeStream();
|
||||||
nodes = null;
|
setPipeline(null, null);
|
||||||
stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
|
stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,7 +511,7 @@ public void run() {
|
|||||||
if(DFSClient.LOG.isDebugEnabled()) {
|
if(DFSClient.LOG.isDebugEnabled()) {
|
||||||
DFSClient.LOG.debug("Allocating new block");
|
DFSClient.LOG.debug("Allocating new block");
|
||||||
}
|
}
|
||||||
nodes = nextBlockOutputStream();
|
setPipeline(nextBlockOutputStream());
|
||||||
initDataStreaming();
|
initDataStreaming();
|
||||||
} else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
|
} else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
|
||||||
if(DFSClient.LOG.isDebugEnabled()) {
|
if(DFSClient.LOG.isDebugEnabled()) {
|
||||||
@ -912,7 +919,7 @@ private void addDatanode2ExistingPipeline() throws IOException {
|
|||||||
src, block, nodes, storageIDs,
|
src, block, nodes, storageIDs,
|
||||||
failed.toArray(new DatanodeInfo[failed.size()]),
|
failed.toArray(new DatanodeInfo[failed.size()]),
|
||||||
1, dfsClient.clientName);
|
1, dfsClient.clientName);
|
||||||
nodes = lb.getLocations();
|
setPipeline(lb);
|
||||||
|
|
||||||
//find the new datanode
|
//find the new datanode
|
||||||
final int d = findNewDatanode(original);
|
final int d = findNewDatanode(original);
|
||||||
@ -1012,7 +1019,14 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
|
|||||||
System.arraycopy(nodes, 0, newnodes, 0, errorIndex);
|
System.arraycopy(nodes, 0, newnodes, 0, errorIndex);
|
||||||
System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex,
|
System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex,
|
||||||
newnodes.length-errorIndex);
|
newnodes.length-errorIndex);
|
||||||
nodes = newnodes;
|
|
||||||
|
final String[] newStorageIDs = new String[newnodes.length];
|
||||||
|
System.arraycopy(storageIDs, 0, newStorageIDs, 0, errorIndex);
|
||||||
|
System.arraycopy(storageIDs, errorIndex+1, newStorageIDs, errorIndex,
|
||||||
|
newStorageIDs.length-errorIndex);
|
||||||
|
|
||||||
|
setPipeline(newnodes, newStorageIDs);
|
||||||
|
|
||||||
hasError = false;
|
hasError = false;
|
||||||
lastException.set(null);
|
lastException.set(null);
|
||||||
errorIndex = -1;
|
errorIndex = -1;
|
||||||
@ -1051,7 +1065,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
|
|||||||
* Must get block ID and the IDs of the destinations from the namenode.
|
* Must get block ID and the IDs of the destinations from the namenode.
|
||||||
* Returns the list of target datanodes.
|
* Returns the list of target datanodes.
|
||||||
*/
|
*/
|
||||||
private DatanodeInfo[] nextBlockOutputStream() throws IOException {
|
private LocatedBlock nextBlockOutputStream() throws IOException {
|
||||||
LocatedBlock lb = null;
|
LocatedBlock lb = null;
|
||||||
DatanodeInfo[] nodes = null;
|
DatanodeInfo[] nodes = null;
|
||||||
int count = dfsClient.getConf().nBlockWriteRetry;
|
int count = dfsClient.getConf().nBlockWriteRetry;
|
||||||
@ -1093,7 +1107,7 @@ private DatanodeInfo[] nextBlockOutputStream() throws IOException {
|
|||||||
if (!success) {
|
if (!success) {
|
||||||
throw new IOException("Unable to create new block.");
|
throw new IOException("Unable to create new block.");
|
||||||
}
|
}
|
||||||
return nodes;
|
return lb;
|
||||||
}
|
}
|
||||||
|
|
||||||
// connects to the first datanode in the pipeline
|
// connects to the first datanode in the pipeline
|
||||||
|
@ -1620,7 +1620,6 @@ public void processReport(final DatanodeID nodeID,
|
|||||||
// To minimize startup time, we discard any second (or later) block reports
|
// To minimize startup time, we discard any second (or later) block reports
|
||||||
// that we receive while still in startup phase.
|
// that we receive while still in startup phase.
|
||||||
final DatanodeStorageInfo storageInfo = node.updateStorage(storage);
|
final DatanodeStorageInfo storageInfo = node.updateStorage(storage);
|
||||||
LOG.info("XXX storageInfo=" + storageInfo + ", storage=" + storage);
|
|
||||||
if (namesystem.isInStartupSafeMode()
|
if (namesystem.isInStartupSafeMode()
|
||||||
&& storageInfo.getBlockReportCount() > 0) {
|
&& storageInfo.getBlockReportCount() > 0) {
|
||||||
blockLog.info("BLOCK* processReport: "
|
blockLog.info("BLOCK* processReport: "
|
||||||
|
Loading…
x
Reference in New Issue
Block a user