HDFS-9351. checkNNStartup() need to be called when fsck calls FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)

This commit is contained in:
Yongjun Zhang 2015-11-03 17:16:17 -08:00
parent dac0463a4e
commit 194251c852
3 changed files with 11 additions and 21 deletions

View File

@ -2228,6 +2228,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
commitBlock. (Chang Li via zhz) commitBlock. (Chang Li via zhz)
HDFS-9351. checkNNStartup() need to be called when fsck calls
FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -6364,26 +6364,6 @@ List<String> listCorruptFileBlocksWithSnapshot(String path,
return list; return list;
} }
/**
* Get the list of snapshottable directories.
* @return The list of all the current snapshottable directories
* @see #getSnapshottableDirListing()
* @throws IOException
*/
List<String> getSnapshottableDirs() throws IOException {
List<String> snapshottableDirs = new ArrayList<String>();
final FSPermissionChecker pc = getFSDirectory().getPermissionChecker();
final String user = pc.isSuperUser() ? null : pc.getUser();
final SnapshottableDirectoryStatus[] snapDirs =
snapshotManager.getSnapshottableDirListing(user);
if (snapDirs != null) {
for (SnapshottableDirectoryStatus sds : snapDirs) {
snapshottableDirs.add(sds.getFullPath().toString());
}
}
return snapshottableDirs;
}
@Override //NameNodeMXBean @Override //NameNodeMXBean
public int getDistinctVersionCount() { public int getDistinctVersionCount() {
return blockManager.getDatanodeManager().getDatanodesSoftwareVersions() return blockManager.getDatanodeManager().getDatanodesSoftwareVersions()

View File

@ -60,6 +60,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@ -345,7 +346,13 @@ public void fsck() {
namenode.getNamesystem().logFsckEvent(path, remoteAddress); namenode.getNamesystem().logFsckEvent(path, remoteAddress);
if (snapshottableDirs != null) { if (snapshottableDirs != null) {
snapshottableDirs = namenode.getNamesystem().getSnapshottableDirs(); SnapshottableDirectoryStatus[] snapshotDirs =
namenode.getRpcServer().getSnapshottableDirListing();
if (snapshotDirs != null) {
for (SnapshottableDirectoryStatus dir : snapshotDirs) {
snapshottableDirs.add(dir.getFullPath().toString());
}
}
} }
final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path); final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);