diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f796b0ecf54..c0405d396de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -771,6 +771,9 @@ Release 2.0.5-beta - UNRELEASED HDFS-4842. Identify the correct prior snapshot when deleting a snapshot under a renamed subtree. (jing9) + HDFS-4846. Clean up snapshot CLI commands output stacktrace for invalid + arguments. (Jing Zhao via brandonli) + Release 2.0.4-alpha - 2013-04-25 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 9ec7c64155c..c945d5e5aa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2152,11 +2152,7 @@ public class DFSClient implements java.io.Closeable { */ public void allowSnapshot(String snapshotRoot) throws IOException { checkOpen(); - try { - namenode.allowSnapshot(snapshotRoot); - } catch(RemoteException re) { - throw re.unwrapRemoteException(); - } + namenode.allowSnapshot(snapshotRoot); } /** @@ -2166,11 +2162,7 @@ public class DFSClient implements java.io.Closeable { */ public void disallowSnapshot(String snapshotRoot) throws IOException { checkOpen(); - try { - namenode.disallowSnapshot(snapshotRoot); - } catch(RemoteException re) { - throw re.unwrapRemoteException(); - } + namenode.disallowSnapshot(snapshotRoot); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c019e40bffe..9fa302a1890 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1433,7 +1433,7 @@ public class FSDirectory implements Closeable { INodeDirectorySnapshottable ssTargetDir = (INodeDirectorySnapshottable) targetDir; if (ssTargetDir.getNumSnapshots() > 0) { - throw new IOException("The direcotry " + ssTargetDir.getFullPathName() + throw new IOException("The directory " + ssTargetDir.getFullPathName() + " cannot be deleted since " + ssTargetDir.getFullPathName() + " is snapshottable and already has snapshots"); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java index c8ac8611a31..d7621794391 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java @@ -51,8 +51,13 @@ public class LsSnapshottableDir { } DistributedFileSystem dfs = (DistributedFileSystem) fs; - SnapshottableDirectoryStatus[] stats = dfs.getSnapshottableDirListing(); - SnapshottableDirectoryStatus.print(stats, System.out); + try { + SnapshottableDirectoryStatus[] stats = dfs.getSnapshottableDirListing(); + SnapshottableDirectoryStatus.print(stats, System.out); + } catch (IOException e) { + String[] content = e.getLocalizedMessage().split("\n"); + System.err.println("lsSnapshottableDir: " + content[0]); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java index 5a0b2038f3d..c130703cf1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java @@ -82,9 +82,14 @@ public class SnapshotDiff { Path snapshotRoot = new Path(argv[0]); String fromSnapshot = getSnapshotName(argv[1]); String toSnapshot = getSnapshotName(argv[2]); - SnapshotDiffReport diffReport = dfs.getSnapshotDiffReport(snapshotRoot, - fromSnapshot, toSnapshot); - System.out.println(diffReport.toString()); + try { + SnapshotDiffReport diffReport = dfs.getSnapshotDiffReport(snapshotRoot, + fromSnapshot, toSnapshot); + System.out.println(diffReport.toString()); + } catch (IOException e) { + String[] content = e.getLocalizedMessage().split("\n"); + System.err.println("snapshotDiff: " + content[0]); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index 0197608a6c1..a983098224d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -135,7 +135,7 @@ public class TestNestedSnapshots { try { hdfs.disallowSnapshot(rootPath); fail("Expect snapshot exception when disallowing snapshot on root again"); - } catch (SnapshotException e) { + } catch (RemoteException e) { GenericTestUtils.assertExceptionContains( "Root is not a snapshottable directory", e); } @@ -149,14 +149,16 @@ public class TestNestedSnapshots { try { hdfs.allowSnapshot(rootPath); Assert.fail(); - } catch(SnapshotException se) { - assertNestedSnapshotException(se, "subdirectory"); + } catch(RemoteException se) { + assertNestedSnapshotException( + (SnapshotException) se.unwrapRemoteException(), "subdirectory"); } try { hdfs.allowSnapshot(foo); Assert.fail(); - } catch(SnapshotException se) { - assertNestedSnapshotException(se, "subdirectory"); + } catch(RemoteException se) { + assertNestedSnapshotException( + (SnapshotException) se.unwrapRemoteException(), "subdirectory"); } final Path sub1Bar = new Path(bar, "sub1"); @@ -165,14 +167,16 @@ public class TestNestedSnapshots { try { hdfs.allowSnapshot(sub1Bar); Assert.fail(); - } catch(SnapshotException se) { - assertNestedSnapshotException(se, "ancestor"); + } catch(RemoteException se) { + assertNestedSnapshotException( + (SnapshotException) se.unwrapRemoteException(), "ancestor"); } try { hdfs.allowSnapshot(sub2Bar); Assert.fail(); - } catch(SnapshotException se) { - assertNestedSnapshotException(se, "ancestor"); + } catch(RemoteException se) { + assertNestedSnapshotException( + (SnapshotException) se.unwrapRemoteException(), "ancestor"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index d14dd88087f..e795673fc07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -112,7 +112,7 @@ public class TestSnapshotDeletion { // Deleting a snapshottable dir with snapshots should fail exception.expect(RemoteException.class); - String error = "The direcotry " + sub.toString() + String error = "The directory " + sub.toString() + " cannot be deleted since " + sub.toString() + " is snapshottable and already has snapshots"; exception.expectMessage(error);