svn merge -c 1416064 from trunk for HDFS-4248. Renaming directories may incorrectly remove the paths in leases under the tree.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1416066 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-12-01 22:41:17 +00:00
parent 85a5ad4bf9
commit 76029e926c
6 changed files with 112 additions and 42 deletions

View File

@ -1738,6 +1738,9 @@ Release 0.23.6 - UNRELEASED
HDFS-4247. saveNamespace should be tolerant of dangling lease (daryn)
HDFS-4248. Renaming directories may incorrectly remove the paths in leases
under the tree. (daryn via szetszwo)
Release 0.23.5 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -600,6 +600,8 @@ public class FSDirectory implements Closeable {
// update modification time of dst and the parent of src
srcInodes[srcInodes.length-2].setModificationTime(timestamp);
dstInodes[dstInodes.length-2].setModificationTime(timestamp);
// update moved leases with new filename
getFSNamesystem().unprotectedChangeLease(src, dst);
return true;
}
} finally {
@ -757,6 +759,8 @@ public class FSDirectory implements Closeable {
}
srcInodes[srcInodes.length - 2].setModificationTime(timestamp);
dstInodes[dstInodes.length - 2].setModificationTime(timestamp);
// update moved lease with new filename
getFSNamesystem().unprotectedChangeLease(src, dst);
// Collect the blocks and remove the lease for previous dst
if (removedDst != null) {

View File

@ -32,7 +32,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
@ -362,10 +361,8 @@ public class FSEditLogLoader {
}
case OP_RENAME_OLD: {
RenameOldOp renameOp = (RenameOldOp)op;
HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
renameOp.timestamp);
fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo);
break;
}
case OP_DELETE: {
@ -435,11 +432,8 @@ public class FSEditLogLoader {
}
case OP_RENAME: {
RenameOp renameOp = (RenameOp)op;
HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
renameOp.timestamp, renameOp.options);
fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo);
break;
}
case OP_GET_DELEGATION_TOKEN: {

View File

@ -2569,15 +2569,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (isPermissionEnabled) {
//We should not be doing this. This is move() not renameTo().
//but for now,
//NOTE: yes, this is bad! it's assuming much lower level behavior
// of rewriting the dst
String actualdst = dir.isDir(dst)?
dst + Path.SEPARATOR + new Path(src).getName(): dst;
checkParentAccess(src, FsAction.WRITE);
checkAncestorAccess(actualdst, FsAction.WRITE);
}
HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
if (dir.renameTo(src, dst)) {
unprotectedChangeLease(src, dst, dinfo); // update lease with new filename
return true;
}
return false;
@ -2628,9 +2628,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkAncestorAccess(dst, FsAction.WRITE);
}
HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
dir.renameTo(src, dst, options);
unprotectedChangeLease(src, dst, dinfo); // update lease with new filename
}
/**
@ -4861,31 +4859,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// rename was successful. If any part of the renamed subtree had
// files that were being written to, update with new filename.
void unprotectedChangeLease(String src, String dst, HdfsFileStatus dinfo) {
String overwrite;
String replaceBy;
void unprotectedChangeLease(String src, String dst) {
assert hasWriteLock();
boolean destinationExisted = true;
if (dinfo == null) {
destinationExisted = false;
}
if (destinationExisted && dinfo.isDir()) {
Path spath = new Path(src);
Path parent = spath.getParent();
if (parent.isRoot()) {
overwrite = parent.toString();
} else {
overwrite = parent.toString() + Path.SEPARATOR;
}
replaceBy = dst + Path.SEPARATOR;
} else {
overwrite = src;
replaceBy = dst;
}
leaseManager.changeLease(src, dst, overwrite, replaceBy);
leaseManager.changeLease(src, dst);
}
/**

View File

@ -331,22 +331,19 @@ public class LeaseManager {
}
}
synchronized void changeLease(String src, String dst,
String overwrite, String replaceBy) {
synchronized void changeLease(String src, String dst) {
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ".changelease: " +
" src=" + src + ", dest=" + dst +
", overwrite=" + overwrite +
", replaceBy=" + replaceBy);
" src=" + src + ", dest=" + dst);
}
final int len = overwrite.length();
final int len = src.length();
for(Map.Entry<String, Lease> entry
: findLeaseWithPrefixPath(src, sortedLeasesByPath).entrySet()) {
final String oldpath = entry.getKey();
final Lease lease = entry.getValue();
//overwrite must be a prefix of oldpath
final String newpath = replaceBy + oldpath.substring(len);
// replace stem of src with new destination
final String newpath = dst + oldpath.substring(len);
if (LOG.isDebugEnabled()) {
LOG.debug("changeLease: replacing " + oldpath + " with " + newpath);
}

View File

@ -30,7 +30,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@ -49,6 +51,10 @@ public class TestLease {
).getLeaseByPath(src.toString()) != null;
}
static int leaseCount(MiniDFSCluster cluster) {
return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
}
static final String dirString = "/test/lease";
final Path dir = new Path(dirString);
static final Log LOG = LogFactory.getLog(TestLease.class);
@ -126,6 +132,96 @@ public class TestLease {
}
}
@Test
public void testLeaseAfterRename() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
Path p = new Path("/test-file");
Path d = new Path("/test-d");
Path d2 = new Path("/test-d-other");
// open a file to get a lease
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream out = fs.create(p);
out.writeBytes("something");
//out.hsync();
Assert.assertTrue(hasLease(cluster, p));
Assert.assertEquals(1, leaseCount(cluster));
// just to ensure first fs doesn't have any logic to twiddle leases
DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
// rename the file into an existing dir
LOG.info("DMS: rename file into dir");
Path pRenamed = new Path(d, p.getName());
fs2.mkdirs(d);
fs2.rename(p, pRenamed);
Assert.assertFalse(p+" exists", fs2.exists(p));
Assert.assertTrue(pRenamed+" not found", fs2.exists(pRenamed));
Assert.assertFalse("has lease for "+p, hasLease(cluster, p));
Assert.assertTrue("no lease for "+pRenamed, hasLease(cluster, pRenamed));
Assert.assertEquals(1, leaseCount(cluster));
// rename the parent dir to a new non-existent dir
LOG.info("DMS: rename parent dir");
Path pRenamedAgain = new Path(d2, pRenamed.getName());
fs2.rename(d, d2);
// src gone
Assert.assertFalse(d+" exists", fs2.exists(d));
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d2+" not found", fs2.exists(d2));
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
// rename the parent dir to existing dir
// NOTE: rename w/o options moves paths into existing dir
LOG.info("DMS: rename parent again");
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(new Path(d, d2.getName()), p.getName());
fs2.mkdirs(d);
fs2.rename(d2, d);
// src gone
Assert.assertFalse(d2+" exists", fs2.exists(d2));
Assert.assertFalse("no lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d+" not found", fs2.exists(d));
Assert.assertTrue(pRenamedAgain +" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
// rename with opts to non-existent dir
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(d2, p.getName());
fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
// src gone
Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d2+" not found", fs2.exists(d2));
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
// rename with opts to existing dir
// NOTE: rename with options will not move paths into the existing dir
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(d, p.getName());
fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
// src gone
Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d+" not found", fs2.exists(d));
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
} finally {
cluster.shutdown();
}
}
@Test
public void testLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();