HDFS-6563. NameNode cannot save fsimage in certain circumstances when snapshots are in use. Contributed by Aaron T. Myers.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1603712 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a4e0ff5e05
commit
3f82484218
|
@ -670,6 +670,9 @@ Release 2.5.0 - UNRELEASED
|
||||||
HDFS-6553. Add missing DeprecationDeltas for NFS Kerberos configurations
|
HDFS-6553. Add missing DeprecationDeltas for NFS Kerberos configurations
|
||||||
(Stephen Chu via brandonli)
|
(Stephen Chu via brandonli)
|
||||||
|
|
||||||
|
HDFS-6563. NameNode cannot save fsimage in certain circumstances when
|
||||||
|
snapshots are in use. (atm)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||||
|
|
|
@ -533,8 +533,10 @@ public final class FSImageFormatPBINode {
|
||||||
INodeSection.INodeFile.Builder b = buildINodeFile(n,
|
INodeSection.INodeFile.Builder b = buildINodeFile(n,
|
||||||
parent.getSaverContext());
|
parent.getSaverContext());
|
||||||
|
|
||||||
for (Block block : n.getBlocks()) {
|
if (n.getBlocks() != null) {
|
||||||
b.addBlocks(PBHelper.convert(block));
|
for (Block block : n.getBlocks()) {
|
||||||
|
b.addBlocks(PBHelper.convert(block));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
|
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
|
||||||
|
|
|
@ -159,7 +159,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
|
||||||
// resize the array.
|
// resize the array.
|
||||||
final BlockInfo[] newBlocks;
|
final BlockInfo[] newBlocks;
|
||||||
if (n == 0) {
|
if (n == 0) {
|
||||||
newBlocks = null;
|
newBlocks = BlockInfo.EMPTY_ARRAY;
|
||||||
} else {
|
} else {
|
||||||
newBlocks = new BlockInfo[n];
|
newBlocks = new BlockInfo[n];
|
||||||
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
|
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
|
||||||
|
|
|
@ -28,12 +28,14 @@ import static org.junit.Assert.fail;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
|
@ -396,4 +398,39 @@ public class TestSnapshotBlocksMap {
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make sure that a delete of a non-zero-length file which results in a
|
||||||
|
* zero-length file in a snapshot works.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
|
||||||
|
final Path foo = new Path("/foo");
|
||||||
|
final Path bar = new Path(foo, "bar");
|
||||||
|
final byte[] testData = "foo bar baz".getBytes();
|
||||||
|
|
||||||
|
// Create a zero-length file.
|
||||||
|
DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
|
||||||
|
assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);
|
||||||
|
|
||||||
|
// Create a snapshot that includes that file.
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
|
||||||
|
|
||||||
|
// Extend that file.
|
||||||
|
FSDataOutputStream out = hdfs.append(bar);
|
||||||
|
out.write(testData);
|
||||||
|
out.close();
|
||||||
|
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||||
|
BlockInfo[] blks = barNode.getBlocks();
|
||||||
|
assertEquals(1, blks.length);
|
||||||
|
assertEquals(testData.length, blks[0].getNumBytes());
|
||||||
|
|
||||||
|
// Delete the file.
|
||||||
|
hdfs.delete(bar, true);
|
||||||
|
|
||||||
|
// Now make sure that the NN can still save an fsimage successfully.
|
||||||
|
cluster.getNameNode().getRpcServer().setSafeMode(
|
||||||
|
SafeModeAction.SAFEMODE_ENTER, false);
|
||||||
|
cluster.getNameNode().getRpcServer().saveNamespace();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue