merge -r 1440191:1440192 from trunk to branch-2 to fix HDFS-4288
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1440194 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1cc2fcd3b2
commit
64c6dd8f5c
|
@ -2013,6 +2013,8 @@ Release 0.23.7 - UNRELEASED
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
|
HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal)
|
||||||
|
|
||||||
Release 0.23.6 - UNRELEASED
|
Release 0.23.6 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -83,11 +83,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
|
||||||
DatanodeDescriptor getDatanode(int index) {
|
DatanodeDescriptor getDatanode(int index) {
|
||||||
assert this.triplets != null : "BlockInfo is not initialized";
|
assert this.triplets != null : "BlockInfo is not initialized";
|
||||||
assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
|
assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
|
||||||
DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3];
|
return (DatanodeDescriptor)triplets[index*3];
|
||||||
assert node == null ||
|
|
||||||
DatanodeDescriptor.class.getName().equals(node.getClass().getName()) :
|
|
||||||
"DatanodeDescriptor is expected at " + index*3;
|
|
||||||
return node;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfo getPrevious(int index) {
|
BlockInfo getPrevious(int index) {
|
||||||
|
|
|
@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
|
@ -1576,7 +1577,10 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log the block report processing stats from Namenode perspective
|
// Log the block report processing stats from Namenode perspective
|
||||||
NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
|
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
|
||||||
|
if (metrics != null) {
|
||||||
|
metrics.addBlockReport((int) (endTime - startTime));
|
||||||
|
}
|
||||||
blockLog.info("BLOCK* processReport: from "
|
blockLog.info("BLOCK* processReport: from "
|
||||||
+ nodeID + ", blocks: " + newReport.getNumberOfBlocks()
|
+ nodeID + ", blocks: " + newReport.getNumberOfBlocks()
|
||||||
+ ", processing time: " + (endTime - startTime) + " msecs");
|
+ ", processing time: " + (endTime - startTime) + " msecs");
|
||||||
|
|
|
@ -551,6 +551,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
@Override
|
@Override
|
||||||
public void updateRegInfo(DatanodeID nodeReg) {
|
public void updateRegInfo(DatanodeID nodeReg) {
|
||||||
super.updateRegInfo(nodeReg);
|
super.updateRegInfo(nodeReg);
|
||||||
|
firstBlockReport = true; // must re-process IBR after re-registration
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -406,7 +406,7 @@ public class DatanodeManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add a datanode. */
|
/** Add a datanode. */
|
||||||
private void addDatanode(final DatanodeDescriptor node) {
|
void addDatanode(final DatanodeDescriptor node) {
|
||||||
// To keep host2DatanodeMap consistent with datanodeMap,
|
// To keep host2DatanodeMap consistent with datanodeMap,
|
||||||
// remove from host2DatanodeMap the datanodeDescriptor removed
|
// remove from host2DatanodeMap the datanodeDescriptor removed
|
||||||
// from datanodeMap before adding node to host2DatanodeMap.
|
// from datanodeMap before adding node to host2DatanodeMap.
|
||||||
|
|
|
@ -34,13 +34,16 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
import static org.mockito.Mockito.*;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
|
@ -485,4 +488,70 @@ public class TestBlockManager {
|
||||||
new NumberReplicas(),
|
new NumberReplicas(),
|
||||||
UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
|
UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSafeModeIBR() throws Exception {
|
||||||
|
DatanodeDescriptor node = spy(nodes.get(0));
|
||||||
|
node.setStorageID("dummy-storage");
|
||||||
|
node.isAlive = true;
|
||||||
|
|
||||||
|
DatanodeRegistration nodeReg =
|
||||||
|
new DatanodeRegistration(node, null, null, "");
|
||||||
|
|
||||||
|
// pretend to be in safemode
|
||||||
|
doReturn(true).when(fsn).isInStartupSafeMode();
|
||||||
|
|
||||||
|
// register new node
|
||||||
|
bm.getDatanodeManager().registerDatanode(nodeReg);
|
||||||
|
bm.getDatanodeManager().addDatanode(node); // swap in spy
|
||||||
|
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
|
||||||
|
assertTrue(node.isFirstBlockReport());
|
||||||
|
// send block report, should be processed
|
||||||
|
reset(node);
|
||||||
|
bm.processReport(node, "pool", new BlockListAsLongs(null, null));
|
||||||
|
verify(node).receivedBlockReport();
|
||||||
|
assertFalse(node.isFirstBlockReport());
|
||||||
|
// send block report again, should NOT be processed
|
||||||
|
reset(node);
|
||||||
|
bm.processReport(node, "pool", new BlockListAsLongs(null, null));
|
||||||
|
verify(node, never()).receivedBlockReport();
|
||||||
|
assertFalse(node.isFirstBlockReport());
|
||||||
|
|
||||||
|
// re-register as if node restarted, should update existing node
|
||||||
|
bm.getDatanodeManager().removeDatanode(node);
|
||||||
|
reset(node);
|
||||||
|
bm.getDatanodeManager().registerDatanode(nodeReg);
|
||||||
|
verify(node).updateRegInfo(nodeReg);
|
||||||
|
assertTrue(node.isFirstBlockReport()); // ready for report again
|
||||||
|
// send block report, should be processed after restart
|
||||||
|
reset(node);
|
||||||
|
bm.processReport(node, "pool", new BlockListAsLongs(null, null));
|
||||||
|
verify(node).receivedBlockReport();
|
||||||
|
assertFalse(node.isFirstBlockReport());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSafeModeIBRAfterIncremental() throws Exception {
|
||||||
|
DatanodeDescriptor node = spy(nodes.get(0));
|
||||||
|
node.setStorageID("dummy-storage");
|
||||||
|
node.isAlive = true;
|
||||||
|
|
||||||
|
DatanodeRegistration nodeReg =
|
||||||
|
new DatanodeRegistration(node, null, null, "");
|
||||||
|
|
||||||
|
// pretend to be in safemode
|
||||||
|
doReturn(true).when(fsn).isInStartupSafeMode();
|
||||||
|
|
||||||
|
// register new node
|
||||||
|
bm.getDatanodeManager().registerDatanode(nodeReg);
|
||||||
|
bm.getDatanodeManager().addDatanode(node); // swap in spy
|
||||||
|
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
|
||||||
|
assertTrue(node.isFirstBlockReport());
|
||||||
|
// send block report while pretending to already have blocks
|
||||||
|
reset(node);
|
||||||
|
doReturn(1).when(node).numBlocks();
|
||||||
|
bm.processReport(node, "pool", new BlockListAsLongs(null, null));
|
||||||
|
verify(node).receivedBlockReport();
|
||||||
|
assertFalse(node.isFirstBlockReport());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue