diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 204df268192..0cace3f0286 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -416,6 +416,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6954. With crypto, no native lib systems are too verbose. (clamb via wang)
+ HDFS-2975. Rename with overwrite flag true can make NameNode to stuck in safemode
+ on NN (crash + restart). (Yi Liu via umamahesh)
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 04f81c1579c..ee9ed11e187 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -452,7 +452,7 @@ public class FSDirectory implements Closeable {
* @see #unprotectedRenameTo(String, String, long, Options.Rename...)
*/
void renameTo(String src, String dst, long mtime,
- Options.Rename... options)
+ BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
throws FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, QuotaExceededException,
UnresolvedLinkException, IOException {
@@ -462,7 +462,7 @@ public class FSDirectory implements Closeable {
}
writeLock();
try {
- if (unprotectedRenameTo(src, dst, mtime, options)) {
+ if (unprotectedRenameTo(src, dst, mtime, collectedBlocks, options)) {
namesystem.incrDeletedFileCount(1);
}
} finally {
@@ -569,8 +569,9 @@ public class FSDirectory implements Closeable {
/**
* Rename src to dst.
- * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)}
- * for details related to rename semantics and exceptions.
+ *
+ * Note: This is to be used by {@link FSEditLog} only.
+ *
*
* @param src source path
* @param dst destination path
@@ -578,9 +579,34 @@ public class FSDirectory implements Closeable {
* @param options Rename options
*/
boolean unprotectedRenameTo(String src, String dst, long timestamp,
- Options.Rename... options) throws FileAlreadyExistsException,
- FileNotFoundException, ParentNotDirectoryException,
+ Options.Rename... options) throws FileAlreadyExistsException,
+ FileNotFoundException, ParentNotDirectoryException,
QuotaExceededException, UnresolvedLinkException, IOException {
+ BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
+ boolean ret = unprotectedRenameTo(src, dst, timestamp,
+ collectedBlocks, options);
+ if (!collectedBlocks.getToDeleteList().isEmpty()) {
+ getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
+ }
+ return ret;
+ }
+
+ /**
+ * Rename src to dst.
+ * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)}
+ * for details related to rename semantics and exceptions.
+ *
+ * @param src source path
+ * @param dst destination path
+ * @param timestamp modification time
+ * @param collectedBlocks blocks to be removed
+ * @param options Rename options
+ */
+ boolean unprotectedRenameTo(String src, String dst, long timestamp,
+ BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
+ throws FileAlreadyExistsException, FileNotFoundException,
+ ParentNotDirectoryException, QuotaExceededException,
+ UnresolvedLinkException, IOException {
assert hasWriteLock();
boolean overwrite = options != null && Arrays.asList(options).contains
(Rename.OVERWRITE);
@@ -671,7 +697,6 @@ public class FSDirectory implements Closeable {
if (removedDst != null) {
undoRemoveDst = false;
if (removedNum > 0) {
- BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List removedINodes = new ChunkedArrayList();
if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
removedDst.destroyAndCollectBlocks(collectedBlocks, removedINodes);
@@ -681,7 +706,7 @@ public class FSDirectory implements Closeable {
dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
true).get(Quota.NAMESPACE) >= 0;
}
- getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
+ getFSNamesystem().removePathAndBlocks(src, null,
removedINodes, false);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 076c9c8193e..b2a2edaa5a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3641,12 +3641,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
HdfsFileStatus resultingStat = null;
boolean success = false;
writeLock();
+ BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot rename " + src);
src = resolvePath(src, srcComponents);
dst = resolvePath(dst, dstComponents);
- renameToInternal(pc, src, dst, cacheEntry != null, options);
+ renameToInternal(pc, src, dst, cacheEntry != null,
+ collectedBlocks, options);
resultingStat = getAuditFileInfo(dst, false);
success = true;
} finally {
@@ -3654,6 +3656,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
+ if (!collectedBlocks.getToDeleteList().isEmpty()) {
+ removeBlocks(collectedBlocks);
+ collectedBlocks.clear();
+ }
if (resultingStat != null) {
StringBuilder cmd = new StringBuilder("rename options=");
for (Rename option : options) {
@@ -3663,8 +3669,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
- private void renameToInternal(FSPermissionChecker pc, String src, String dst,
- boolean logRetryCache, Options.Rename... options) throws IOException {
+ private void renameToInternal(FSPermissionChecker pc, String src,
+ String dst, boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks,
+ Options.Rename... options) throws IOException {
assert hasWriteLock();
if (isPermissionEnabled) {
// Rename does not operates on link targets
@@ -3679,7 +3686,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
waitForLoadingFSImage();
long mtime = now();
- dir.renameTo(src, dst, mtime, options);
+ dir.renameTo(src, dst, mtime, collectedBlocks, options);
getEditLog().logRename(src, dst, mtime, logRetryCache, options);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
index 2e748b5b1c2..e7002c301c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
@@ -131,6 +131,7 @@ public class TestDFSRename {
/**
* Check the blocks of dst file are cleaned after rename with overwrite
+ * Restart NN to check the rename successfully
*/
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
@@ -160,6 +161,11 @@ public class TestDFSRename {
dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
getLocalBlock()) == null);
+
+ // Restart NN and check the rename successfully
+ cluster.restartNameNodes();
+ assertFalse(dfs.exists(srcPath));
+ assertTrue(dfs.exists(dstPath));
} finally {
if (dfs != null) {
dfs.close();