HDFS-8809. HDFS fsck reports under construction blocks as CORRUPT. Contributed by Jing Zhao.
(cherry picked from commit c8bca62718
)
This commit is contained in:
parent
a0450b96a2
commit
cd0c6b5789
|
@ -850,6 +850,8 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java
|
||||
requires it (Ayappan via Colin P. McCabe)
|
||||
|
||||
HDFS-8809. HDFS fsck reports under construction blocks as "CORRUPT". (jing9)
|
||||
|
||||
Release 2.7.2 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -523,6 +523,9 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
LocatedBlocks blocks) throws IOException {
|
||||
String path = file.getFullName(parent);
|
||||
boolean isOpen = blocks.isUnderConstruction();
|
||||
if (isOpen && !showOpenFiles) {
|
||||
return;
|
||||
}
|
||||
int missing = 0;
|
||||
int corrupt = 0;
|
||||
long missize = 0;
|
||||
|
@ -530,8 +533,15 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
int misReplicatedPerFile = 0;
|
||||
StringBuilder report = new StringBuilder();
|
||||
int blockNumber = 0;
|
||||
final LocatedBlock lastBlock = blocks.getLastLocatedBlock();
|
||||
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
|
||||
ExtendedBlock block = lBlk.getBlock();
|
||||
if (!blocks.isLastBlockComplete() && lastBlock != null &&
|
||||
lastBlock.getBlock().equals(block)) {
|
||||
// this is the last block and this is not complete. ignore it since
|
||||
// it is under construction
|
||||
continue;
|
||||
}
|
||||
BlockManager bm = namenode.getNamesystem().getBlockManager();
|
||||
|
||||
final BlockInfo storedBlock = bm.getStoredBlock(
|
||||
|
|
|
@ -65,6 +65,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
|
@ -601,6 +602,7 @@ public class TestFsck {
|
|||
out.write(randomString.getBytes());
|
||||
writeCount++;
|
||||
}
|
||||
((DFSOutputStream) out.getWrappedStream()).hflush();
|
||||
// We expect the filesystem to be HEALTHY and show one open file
|
||||
outStr = runFsck(conf, 0, true, topDir);
|
||||
System.out.println(outStr);
|
||||
|
|
Loading…
Reference in New Issue