HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted file present in snapshot. Contributed by Kihwal Lee.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1609543 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-07-10 18:18:22 +00:00
parent cce02e3bef
commit 8f520386fb
3 changed files with 125 additions and 4 deletions

View File

@ -810,6 +810,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6631. TestPread#testHedgedReadLoopTooManyTimes fails intermittently. HDFS-6631. TestPread#testHedgedReadLoopTooManyTimes fails intermittently.
(Liang Xie via cnauroth) (Liang Xie via cnauroth)
HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted
file present in snapshot (kihwal)
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

View File

@ -3101,10 +3101,9 @@ private INodeFile checkLease(String src, String holder, INode inode,
: "Holder " + holder + " does not have any open files.")); : "Holder " + holder + " does not have any open files."));
} }
// No further modification is allowed on a deleted file. // No further modification is allowed on a deleted file.
// A file is considered deleted, if it has no parent or is marked // A file is considered deleted, if it is not in the inodeMap or is marked
// as deleted in the snapshot feature. // as deleted in the snapshot feature.
if (file.getParent() == null || (file.isWithSnapshot() && if (isFileDeleted(file)) {
file.getFileWithSnapshotFeature().isCurrentFileDeleted())) {
throw new FileNotFoundException(src); throw new FileNotFoundException(src);
} }
String clientName = file.getFileUnderConstructionFeature().getClientName(); String clientName = file.getFileUnderConstructionFeature().getClientName();
@ -6251,6 +6250,16 @@ private long nextBlockId() throws IOException {
return blockId; return blockId;
} }
private boolean isFileDeleted(INodeFile file) {
// Not in the inodeMap or in the snapshot but marked deleted.
if (dir.getInode(file.getId()) == null ||
file.getParent() == null || (file.isWithSnapshot() &&
file.getFileWithSnapshotFeature().isCurrentFileDeleted())) {
return true;
}
return false;
}
private INodeFile checkUCBlock(ExtendedBlock block, private INodeFile checkUCBlock(ExtendedBlock block,
String clientName) throws IOException { String clientName) throws IOException {
assert hasWriteLock(); assert hasWriteLock();
@ -6267,7 +6276,7 @@ private INodeFile checkUCBlock(ExtendedBlock block,
// check file inode // check file inode
final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile(); final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile();
if (file == null || !file.isUnderConstruction()) { if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) {
throw new IOException("The file " + storedBlock + throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction."); " belonged to does not exist or it is not under construction.");
} }

View File

@ -0,0 +1,109 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import org.junit.Test;
public class TestUpdatePipelineWithSnapshots {
// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path("/test-file");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
// Create a file and make sure a block is allocated for it.
out = (DFSOutputStream)(fs.create(file).
getWrappedStream());
out.write(1);
out.hflush();
// Create a snapshot that includes the file.
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
new Path("/"), "s1");
// Grab the block info of this file for later use.
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// Allocate a new block ID/gen stamp so we can simulate pipeline
// recovery.
String clientName = ((DistributedFileSystem)fs).getClient()
.getClientName();
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(
oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
oldBlock.getBlockId(), oldBlock.getNumBytes(),
newLocatedBlock.getBlock().getGenerationStamp());
// Delete the file from the present FS. It will still exist the
// previously-created snapshot. This will log an OP_DELETE for the
// file in question.
fs.delete(file, true);
// Simulate a pipeline recovery, wherein a new block is allocated
// for the existing block, resulting in an OP_UPDATE_BLOCKS being
// logged for the file in question.
try {
namenode.updatePipeline(clientName, oldBlock, newBlock,
newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
} catch (IOException ioe) {
// normal
assertExceptionContains(
"does not exist or it is not under construction", ioe);
}
// Make sure the NN can restart with the edit logs as we have them now.
cluster.restartNameNode(true);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
}