From 4603e4481f0486afcce6b106d4a92a6e90e5b6d9 Mon Sep 17 00:00:00 2001 From: arp Date: Tue, 16 Sep 2014 15:45:07 -0700 Subject: [PATCH] HDFS-7064. Fix unit test failures in HDFS-6581 branch. (Contributed by Xiaoyu Yao) --- .../org/apache/hadoop/fs/shell/CommandWithDestination.java | 3 ++- hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt | 4 ++++ .../hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 5 +++-- .../org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java index 5d0d9d6975d..7152f02d3a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java @@ -478,7 +478,8 @@ abstract class CommandWithDestination extends FsCommand { createFlags.add(LAZY_PERSIST); } return create(item.path, - null, + FsPermission.getFileDefault().applyUMask( + FsPermission.getUMask(getConf())), createFlags, getConf().getInt("io.file.buffer.size", 4096), lazyPersist ? 1 : getDefaultReplication(item.path), diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt index 08776364640..f6c8672b1d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt @@ -47,3 +47,7 @@ HDFS-7066. LazyWriter#evictBlocks misses a null check for replicaState. (Xiaoyu Yao via Arpit Agarwal) + HDFS-7064. Fix unit test failures in HDFS-6581 branch. (Xiaoyu Yao via + Arpit Agarwal) + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 276aa5b9466..149ca27a353 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -381,8 +381,9 @@ public class FsVolumeImpl implements FsVolumeSpi { !FileUtil.fullyDelete(finalizedDir)) { throw new IOException("Failed to delete " + finalizedDir); } - if (!DatanodeUtil.dirNoFilesRecursive(lazypersistDir) || - !FileUtil.fullyDelete(lazypersistDir)) { + if (lazypersistDir.exists() && + ((!DatanodeUtil.dirNoFilesRecursive(lazypersistDir) || + !FileUtil.fullyDelete(lazypersistDir)))) { throw new IOException("Failed to delete " + lazypersistDir); } FileUtil.fullyDelete(tmpDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java index c0b4f9a8690..94af015c463 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java @@ -51,7 +51,7 @@ public class TestDataDirs { String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3,[ram_disk]/dir4"; conf.set(DFS_DATANODE_DATA_DIR_KEY, locations1); locations = DataNode.getStorageLocations(conf); - assertThat(locations.size(), is(4)); + assertThat(locations.size(), is(5)); assertThat(locations.get(0).getStorageType(), is(StorageType.DISK)); assertThat(locations.get(0).getUri(), is(dir0.toURI())); assertThat(locations.get(1).getStorageType(), is(StorageType.DISK));