HDFS-14081. hdfs dfsadmin -metasave metasave_test results NPE. Contributed by Shweta Yakkali.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
(cherry picked from commit 1bea785020
)
This commit is contained in:
parent
a21e2e4dbc
commit
1ceefa726e
|
@ -838,8 +838,13 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
new ArrayList<DatanodeStorageInfo>();
|
new ArrayList<DatanodeStorageInfo>();
|
||||||
|
|
||||||
NumberReplicas numReplicas = new NumberReplicas();
|
NumberReplicas numReplicas = new NumberReplicas();
|
||||||
|
BlockInfo blockInfo = getStoredBlock(block);
|
||||||
|
if (blockInfo == null) {
|
||||||
|
out.println("Block "+ block + " is Null");
|
||||||
|
return;
|
||||||
|
}
|
||||||
// source node returned is not used
|
// source node returned is not used
|
||||||
chooseSourceDatanodes(getStoredBlock(block), containingNodes,
|
chooseSourceDatanodes(blockInfo, containingNodes,
|
||||||
containingLiveReplicasNodes, numReplicas,
|
containingLiveReplicasNodes, numReplicas,
|
||||||
new LinkedList<Byte>(), LowRedundancyBlocks.LEVEL);
|
new LinkedList<Byte>(), LowRedundancyBlocks.LEVEL);
|
||||||
|
|
||||||
|
@ -848,7 +853,7 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas();
|
assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas();
|
||||||
int usableReplicas = numReplicas.liveReplicas() +
|
int usableReplicas = numReplicas.liveReplicas() +
|
||||||
numReplicas.decommissionedAndDecommissioning();
|
numReplicas.decommissionedAndDecommissioning();
|
||||||
|
|
||||||
if (block instanceof BlockInfo) {
|
if (block instanceof BlockInfo) {
|
||||||
BlockCollection bc = getBlockCollection((BlockInfo)block);
|
BlockCollection bc = getBlockCollection((BlockInfo)block);
|
||||||
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
|
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
|
||||||
|
@ -1764,8 +1769,8 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
this.shouldPostponeBlocksFromFuture = postpone;
|
this.shouldPostponeBlocksFromFuture = postpone;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
private void postponeBlock(Block blk) {
|
void postponeBlock(Block blk) {
|
||||||
postponedMisreplicatedBlocks.add(blk);
|
postponedMisreplicatedBlocks.add(blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1756,10 +1756,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
void metaSave(String filename) throws IOException {
|
void metaSave(String filename) throws IOException {
|
||||||
String operationName = "metaSave";
|
String operationName = "metaSave";
|
||||||
checkSuperuserPrivilege(operationName);
|
checkSuperuserPrivilege(operationName);
|
||||||
checkOperation(OperationCategory.UNCHECKED);
|
checkOperation(OperationCategory.READ);
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.UNCHECKED);
|
checkOperation(OperationCategory.READ);
|
||||||
File file = new File(System.getProperty("hadoop.log.dir"), filename);
|
File file = new File(System.getProperty("hadoop.log.dir"), filename);
|
||||||
PrintWriter out = new PrintWriter(new BufferedWriter(
|
PrintWriter out = new PrintWriter(new BufferedWriter(
|
||||||
new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
|
new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
|
||||||
|
|
|
@ -94,6 +94,7 @@ import org.apache.hadoop.ipc.RefreshResponse;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
|
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
|
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
|
||||||
|
import org.apache.hadoop.ipc.StandbyException;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
@ -1537,11 +1538,20 @@ public class DFSAdmin extends FsShell {
|
||||||
nsId, ClientProtocol.class);
|
nsId, ClientProtocol.class);
|
||||||
List<IOException> exceptions = new ArrayList<>();
|
List<IOException> exceptions = new ArrayList<>();
|
||||||
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
|
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
|
||||||
try{
|
try {
|
||||||
proxy.getProxy().metaSave(pathname);
|
proxy.getProxy().metaSave(pathname);
|
||||||
System.out.println("Created metasave file " + pathname
|
System.out.println("Created metasave file " + pathname
|
||||||
+ " in the log directory of namenode " + proxy.getAddress());
|
+ " in the log directory of namenode " + proxy.getAddress());
|
||||||
} catch (IOException ioe){
|
} catch (RemoteException re) {
|
||||||
|
Exception unwrapped = re.unwrapRemoteException(
|
||||||
|
StandbyException.class);
|
||||||
|
if (unwrapped instanceof StandbyException) {
|
||||||
|
System.out.println("Skip Standby NameNode, since it cannot perform"
|
||||||
|
+ " metasave operation");
|
||||||
|
} else {
|
||||||
|
throw re;
|
||||||
|
}
|
||||||
|
} catch (IOException ioe) {
|
||||||
System.out.println("Created metasave file " + pathname
|
System.out.println("Created metasave file " + pathname
|
||||||
+ " in the log directory of namenode " + proxy.getAddress()
|
+ " in the log directory of namenode " + proxy.getAddress()
|
||||||
+ " failed");
|
+ " failed");
|
||||||
|
|
|
@ -1478,6 +1478,41 @@ public class TestBlockManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit test to check the race condition for adding a Block to
|
||||||
|
* postponedMisreplicatedBlocks set which may not present in BlockManager
|
||||||
|
* thus avoiding NullPointerException.
|
||||||
|
**/
|
||||||
|
@Test
|
||||||
|
public void testMetaSavePostponedMisreplicatedBlocks() throws IOException {
|
||||||
|
bm.postponeBlock(new Block());
|
||||||
|
|
||||||
|
File file = new File("test.log");
|
||||||
|
PrintWriter out = new PrintWriter(file);
|
||||||
|
|
||||||
|
bm.metaSave(out);
|
||||||
|
out.flush();
|
||||||
|
|
||||||
|
FileInputStream fstream = new FileInputStream(file);
|
||||||
|
DataInputStream in = new DataInputStream(fstream);
|
||||||
|
|
||||||
|
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
|
||||||
|
StringBuffer buffer = new StringBuffer();
|
||||||
|
String line;
|
||||||
|
try {
|
||||||
|
while ((line = reader.readLine()) != null) {
|
||||||
|
buffer.append(line);
|
||||||
|
}
|
||||||
|
String output = buffer.toString();
|
||||||
|
assertTrue("Metasave output should not have null block ",
|
||||||
|
output.contains("Block blk_0_0 is Null"));
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
reader.close();
|
||||||
|
file.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMetaSaveMissingReplicas() throws Exception {
|
public void testMetaSaveMissingReplicas() throws Exception {
|
||||||
List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
|
List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
|
||||||
|
|
|
@ -414,16 +414,21 @@ public class TestDFSAdminWithHA {
|
||||||
@Test (timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testMetaSave() throws Exception {
|
public void testMetaSave() throws Exception {
|
||||||
setUpHaCluster(false);
|
setUpHaCluster(false);
|
||||||
|
cluster.getDfsCluster().transitionToActive(0);
|
||||||
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
|
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
|
||||||
assertEquals(err.toString().trim(), 0, exitCode);
|
assertEquals(err.toString().trim(), 0, exitCode);
|
||||||
String message = "Created metasave file dfs.meta in the log directory"
|
String messageFromActiveNN = "Created metasave file dfs.meta "
|
||||||
+ " of namenode.*";
|
+ "in the log directory of namenode.*";
|
||||||
assertOutputMatches(message + newLine + message + newLine);
|
String messageFromStandbyNN = "Skip Standby NameNode, since it "
|
||||||
|
+ "cannot perform metasave operation";
|
||||||
|
assertOutputMatches(messageFromActiveNN + newLine +
|
||||||
|
messageFromStandbyNN + newLine);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test (timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testMetaSaveNN1UpNN2Down() throws Exception {
|
public void testMetaSaveNN1UpNN2Down() throws Exception {
|
||||||
setUpHaCluster(false);
|
setUpHaCluster(false);
|
||||||
|
cluster.getDfsCluster().transitionToActive(0);
|
||||||
cluster.getDfsCluster().shutdownNameNode(1);
|
cluster.getDfsCluster().shutdownNameNode(1);
|
||||||
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
|
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
|
||||||
assertNotEquals(err.toString().trim(), 0, exitCode);
|
assertNotEquals(err.toString().trim(), 0, exitCode);
|
||||||
|
@ -437,6 +442,7 @@ public class TestDFSAdminWithHA {
|
||||||
@Test (timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testMetaSaveNN1DownNN2Up() throws Exception {
|
public void testMetaSaveNN1DownNN2Up() throws Exception {
|
||||||
setUpHaCluster(false);
|
setUpHaCluster(false);
|
||||||
|
cluster.getDfsCluster().transitionToActive(1);
|
||||||
cluster.getDfsCluster().shutdownNameNode(0);
|
cluster.getDfsCluster().shutdownNameNode(0);
|
||||||
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
|
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
|
||||||
assertNotEquals(err.toString().trim(), 0, exitCode);
|
assertNotEquals(err.toString().trim(), 0, exitCode);
|
||||||
|
|
Loading…
Reference in New Issue