HDFS-14081. hdfs dfsadmin -metasave metasave_test results NPE. Contributed by Shweta Yakkali.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
Shweta Yakkali 2019-02-20 14:28:37 -08:00 committed by Wei-Chiu Chuang
parent f5b4e0f971
commit 1bea785020
5 changed files with 67 additions and 11 deletions

View File

@ -839,8 +839,13 @@ private void dumpBlockMeta(Block block, PrintWriter out) {
new ArrayList<DatanodeStorageInfo>();
NumberReplicas numReplicas = new NumberReplicas();
BlockInfo blockInfo = getStoredBlock(block);
if (blockInfo == null) {
out.println("Block "+ block + " is Null");
return;
}
// source node returned is not used
chooseSourceDatanodes(getStoredBlock(block), containingNodes,
chooseSourceDatanodes(blockInfo, containingNodes,
containingLiveReplicasNodes, numReplicas,
new ArrayList<Byte>(), LowRedundancyBlocks.LEVEL);
@ -1765,8 +1770,8 @@ public void setPostponeBlocksFromFuture(boolean postpone) {
this.shouldPostponeBlocksFromFuture = postpone;
}
private void postponeBlock(Block blk) {
@VisibleForTesting
void postponeBlock(Block blk) {
postponedMisreplicatedBlocks.add(blk);
}

View File

@ -1768,10 +1768,10 @@ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long
void metaSave(String filename) throws IOException {
String operationName = "metaSave";
checkSuperuserPrivilege(operationName);
checkOperation(OperationCategory.UNCHECKED);
checkOperation(OperationCategory.READ);
writeLock();
try {
checkOperation(OperationCategory.UNCHECKED);
checkOperation(OperationCategory.READ);
File file = new File(System.getProperty("hadoop.log.dir"), filename);
PrintWriter out = new PrintWriter(new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));

View File

@ -94,6 +94,7 @@
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil;
@ -1537,11 +1538,20 @@ public int metaSave(String[] argv, int idx) throws IOException {
nsId, ClientProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
try{
try {
proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname
+ " in the log directory of namenode " + proxy.getAddress());
} catch (IOException ioe){
} catch (RemoteException re) {
Exception unwrapped = re.unwrapRemoteException(
StandbyException.class);
if (unwrapped instanceof StandbyException) {
System.out.println("Skip Standby NameNode, since it cannot perform"
+ " metasave operation");
} else {
throw re;
}
} catch (IOException ioe) {
System.out.println("Created metasave file " + pathname
+ " in the log directory of namenode " + proxy.getAddress()
+ " failed");

View File

@ -1478,6 +1478,41 @@ private void verifyPlacementPolicy(final MiniDFSCluster cluster,
}
}
/**
* Unit test to check the race condition for adding a Block to
* postponedMisreplicatedBlocks set which may not present in BlockManager
* thus avoiding NullPointerException.
**/
@Test
public void testMetaSavePostponedMisreplicatedBlocks() throws IOException {
bm.postponeBlock(new Block());
File file = new File("test.log");
PrintWriter out = new PrintWriter(file);
bm.metaSave(out);
out.flush();
FileInputStream fstream = new FileInputStream(file);
DataInputStream in = new DataInputStream(fstream);
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
StringBuffer buffer = new StringBuffer();
String line;
try {
while ((line = reader.readLine()) != null) {
buffer.append(line);
}
String output = buffer.toString();
assertTrue("Metasave output should not have null block ",
output.contains("Block blk_0_0 is Null"));
} finally {
reader.close();
file.delete();
}
}
@Test
public void testMetaSaveMissingReplicas() throws Exception {
List<DatanodeStorageInfo> origStorages = getStorages(0, 1);

View File

@ -414,16 +414,21 @@ public void testSetNegativeBalancerBandwidth() throws Exception {
@Test (timeout = 30000)
public void testMetaSave() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().transitionToActive(0);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*";
assertOutputMatches(message + newLine + message + newLine);
String messageFromActiveNN = "Created metasave file dfs.meta "
+ "in the log directory of namenode.*";
String messageFromStandbyNN = "Skip Standby NameNode, since it "
+ "cannot perform metasave operation";
assertOutputMatches(messageFromActiveNN + newLine +
messageFromStandbyNN + newLine);
}
@Test (timeout = 30000)
public void testMetaSaveNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().transitionToActive(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertNotEquals(err.toString().trim(), 0, exitCode);
@ -437,6 +442,7 @@ public void testMetaSaveNN1UpNN2Down() throws Exception {
@Test (timeout = 30000)
public void testMetaSaveNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().transitionToActive(1);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertNotEquals(err.toString().trim(), 0, exitCode);