HADOOP-18671. Add recoverLease(), setSafeMode(), isFileClosed() as interfaces to hadoop-common (#5553) (#5620)
* HADOOP-18671. Add recoverLease(), setSafeMode(), isFileClosed() as interfaces to hadoop-common (#5553) The HDFS lease APIs have been replicated as interfaces in hadoop-common so other filesystems can also implement them. Applications which use the leasing APIs should migrate to the new interface where possible. Contributed by Stephen Wu (cherry picked from commit0e46388474
) Conflicts: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletion.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForErasureCodingPolicy.java Change-Id: I2ccd0b6780a86610df61d8528e681db0451e2e4c (cherry picked from commit207972692a
) Conflicts: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ViewDistributedFileSystem.java hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java Change-Id: Ieaa669eb43eb6c1b79b48ff862b438ae9611c08e Co-authored-by: Tak Lon (Stephen) Wu <taklwu@apache.org>
This commit is contained in:
parent
822c7eee0b
commit
ffbf806846
|
@ -123,4 +123,11 @@ public final class CommonPathCapabilities {
|
|||
*/
|
||||
public static final String FS_XATTRS = "fs.capability.paths.xattrs";
|
||||
|
||||
/**
|
||||
* Does this Filesystem support lease recovery operations such as
|
||||
* {@link LeaseRecoverable#recoverLease(Path)} and {@link LeaseRecoverable#isFileClosed(Path)}}?
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String LEASE_RECOVERABLE = "fs.capability.lease.recoverable";
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Whether the given Path of the FileSystem has the capability to perform lease recovery.
|
||||
*/
|
||||
public interface LeaseRecoverable {
|
||||
|
||||
/**
|
||||
* Start the lease recovery of a file.
|
||||
*
|
||||
* @param file path to a file.
|
||||
* @return true if the file is already closed, and it does not require lease recovery.
|
||||
* @throws IOException if an error occurs during lease recovery.
|
||||
* @throws UnsupportedOperationException if lease recovery is not supported by this filesystem.
|
||||
*/
|
||||
boolean recoverLease(Path file) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the close status of a file.
|
||||
* @param file The string representation of the path to the file
|
||||
* @return return true if file is closed
|
||||
* @throws IOException If an I/O error occurred
|
||||
* @throws UnsupportedOperationException if isFileClosed is not supported by this filesystem.
|
||||
*/
|
||||
boolean isFileClosed(Path file) throws IOException;
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Whether the given filesystem is in any status of safe mode.
|
||||
*/
|
||||
public interface SafeMode {
|
||||
|
||||
/**
|
||||
* Enter, leave, or get safe mode.
|
||||
*
|
||||
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
|
||||
* @throws IOException if set safe mode fails to proceed.
|
||||
* @return true if the action is successfully accepted, otherwise false means rejected.
|
||||
*/
|
||||
default boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
return setSafeMode(action, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter, leave, or get safe mode.
|
||||
*
|
||||
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
|
||||
* @param isChecked If true check only for Active metadata node / NameNode's status,
|
||||
* else check first metadata node / NameNode's status.
|
||||
* @throws IOException if set safe mode fails to proceed.
|
||||
* @return true if the action is successfully accepted, otherwise false means rejected.
|
||||
*/
|
||||
boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException;
|
||||
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
/**
|
||||
* An identical copy from org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction, that helps
|
||||
* the other file system implementation to define {@link SafeMode}.
|
||||
*/
|
||||
public enum SafeModeAction {
|
||||
/**
|
||||
* Starting entering into safe mode.
|
||||
*/
|
||||
ENTER,
|
||||
/**
|
||||
* Gracefully exit from safe mode.
|
||||
*/
|
||||
LEAVE,
|
||||
/**
|
||||
* Force Exit from safe mode.
|
||||
*/
|
||||
FORCE_EXIT,
|
||||
/**
|
||||
* Get the status of the safe mode.
|
||||
*/
|
||||
GET;
|
||||
}
|
|
@ -38,3 +38,7 @@ HDFS as these are commonly expected by Hadoop client applications.
|
|||
2. [Testing with the Filesystem specification](testing.html)
|
||||
2. [Extending the specification and its tests](extending.html)
|
||||
1. [Uploading a file using Multiple Parts](multipartuploader.html)
|
||||
1. [openFile()](openfile.html)
|
||||
1. [SafeMode](safemode.html)
|
||||
1. [LeaseRecoverable](leaserecoverable.html)
|
||||
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# <a name="LeaseRecoverable"></a> interface `LeaseRecoverable`
|
||||
|
||||
The `LeaseRecoverable` interface tells whether a given path of current filesystem can perform lease
|
||||
recovery for open file that a lease is not explicitly renewed or the client holding it goes away.
|
||||
|
||||
This interface should be implemented accordingly when necessary to any Filesystem that supports
|
||||
lease recovery, e.g. `DistributedFileSystem` (HDFS) and `ViewDistributedFileSystem`.
|
||||
|
||||
```java
|
||||
public interface LeaseRecoverable {
|
||||
boolean recoverLease(Path file) throws IOException;
|
||||
boolean isFileClosed(Path file) throws IOException;
|
||||
}
|
||||
```
|
||||
|
||||
There are two main functions of this interface, one performs lease recovery and another one
|
||||
verifies if a file has been closed.
|
||||
|
||||
### boolean recoverLease(Path file)
|
||||
|
||||
This function performs the lease recovery for the given file path, and it does not support
|
||||
directory path recovery.
|
||||
1. Return `true`, if the file has already closed, or does not require lease recovery.
|
||||
1. Return `false`, if the lease recovery is yet completed.
|
||||
1. Throw `IOException` if a directory path is given as input.
|
||||
|
||||
### boolean isFileClosed(Path file)
|
||||
|
||||
This function only checks if the give file path has been closed, and it does not support directory
|
||||
verification.
|
||||
1. Return `true`, if the file has been closed.
|
||||
1. Return `false`, if the file is still open.
|
||||
1. Throw `IOException` if a directory path is given as input.
|
||||
|
||||
### Path Capabilities SHOULD BE declared
|
||||
|
||||
If a filesystem supports `LeaseRecoverable`, it should return `true` to
|
||||
`PathCapabilities.hasPathCapability(path, "fs.capability.lease.recoverable")` for a given path.
|
|
@ -0,0 +1,45 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# <a name="SafeMode"></a> interface `SafeMode`
|
||||
|
||||
The `SafeMode` interface provides a way to perform safe mode actions and obtain the
|
||||
status after such actions performed to the `FileSystem`.
|
||||
|
||||
This is admin only interface, should be implemented accordingly when necessary to
|
||||
Filesystem that support safe mode, e.g. `DistributedFileSystem` (HDFS) and
|
||||
`ViewDistributedFileSystem`.
|
||||
|
||||
```java
|
||||
public interface SafeMode {
|
||||
default boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
return setSafeMode(action, false);
|
||||
}
|
||||
boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException;
|
||||
}
|
||||
```
|
||||
|
||||
The goals of this interface is allow any file system implementation to share the
|
||||
same concept of safe mode with the following actions and states
|
||||
|
||||
### Safe mode actions
|
||||
1. `GET`, get the safe mode status of the file system.
|
||||
1. `ENTER`, enter the safe mode for the file system.
|
||||
1. `LEAVE`, exit safe mode for the file system gracefully.
|
||||
1. `FORCE_EXIT`, exit safe mode for the file system even if there is any ongoing data process.
|
||||
|
||||
### Safe mode states
|
||||
1. return true, when safe mode is on.
|
||||
1. return false, when safe mode is off, usually it's the result of safe mode actions
|
||||
with `GET`, `LEAVE`, `FORCE_EXIT`.
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.contract;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LeaseRecoverable;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.test.LambdaTestUtils;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonPathCapabilities.LEASE_RECOVERABLE;
|
||||
|
||||
public abstract class AbstractContractLeaseRecoveryTest extends
|
||||
AbstractFSContractTestBase {
|
||||
|
||||
@Test
|
||||
public void testLeaseRecovery() throws Throwable {
|
||||
final Path path = methodPath();
|
||||
final FileSystem fs = getFileSystem();
|
||||
ContractTestUtils.touch(fs, path);
|
||||
LeaseRecoverable leaseRecoverableFs = verifyAndGetLeaseRecoverableInstance(fs, path);
|
||||
|
||||
Assertions.assertThat(leaseRecoverableFs.recoverLease(path))
|
||||
.describedAs("Issuing lease recovery on a closed file must be successful")
|
||||
.isTrue();
|
||||
|
||||
Assertions.assertThat(leaseRecoverableFs.isFileClosed(path))
|
||||
.describedAs("Get the isFileClose status on a closed file must be successful")
|
||||
.isTrue();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLeaseRecoveryFileNotExist() throws Throwable {
|
||||
final Path path = new Path("notExist");
|
||||
final FileSystem fs = getFileSystem();
|
||||
LeaseRecoverable leaseRecoverableFs = verifyAndGetLeaseRecoverableInstance(fs, path);
|
||||
|
||||
LambdaTestUtils.intercept(FileNotFoundException.class, "File does not exist",
|
||||
() -> leaseRecoverableFs.recoverLease(path));
|
||||
|
||||
LambdaTestUtils.intercept(FileNotFoundException.class, "File does not exist",
|
||||
() -> leaseRecoverableFs.isFileClosed(path));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLeaseRecoveryFileOnDirectory() throws Throwable {
|
||||
final Path path = methodPath();
|
||||
final FileSystem fs = getFileSystem();
|
||||
LeaseRecoverable leaseRecoverableFs = verifyAndGetLeaseRecoverableInstance(fs, path);
|
||||
final Path parentDirectory = path.getParent();
|
||||
|
||||
LambdaTestUtils.intercept(FileNotFoundException.class, "Path is not a file",
|
||||
() -> leaseRecoverableFs.recoverLease(parentDirectory));
|
||||
|
||||
LambdaTestUtils.intercept(FileNotFoundException.class, "Path is not a file",
|
||||
() -> leaseRecoverableFs.isFileClosed(parentDirectory));
|
||||
}
|
||||
|
||||
private LeaseRecoverable verifyAndGetLeaseRecoverableInstance(FileSystem fs, Path path)
|
||||
throws IOException {
|
||||
Assertions.assertThat(fs.hasPathCapability(path, LEASE_RECOVERABLE))
|
||||
.describedAs("path capability %s of %s", LEASE_RECOVERABLE, path)
|
||||
.isTrue();
|
||||
Assertions.assertThat(fs)
|
||||
.describedAs("filesystem %s", fs)
|
||||
.isInstanceOf(LeaseRecoverable.class);
|
||||
return (LeaseRecoverable) fs;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.contract;
|
||||
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.SafeMode;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
|
||||
public abstract class AbstractContractSafeModeTest extends AbstractFSContractTestBase {
|
||||
|
||||
@Test
|
||||
public void testSafeMode() throws Throwable {
|
||||
final FileSystem fs = getFileSystem();
|
||||
SafeMode fsWithSafeMode = verifyAndGetSafeModeInstance(fs);
|
||||
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.GET))
|
||||
.describedAs("Getting the status of safe mode before entering should be off.").isFalse();
|
||||
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.ENTER))
|
||||
.describedAs("Entering Safe mode and safe mode turns on.").isTrue();
|
||||
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.GET))
|
||||
.describedAs("Getting the status of safe mode after entering, safe mode should be on.")
|
||||
.isTrue();
|
||||
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.LEAVE))
|
||||
.describedAs("Leaving safe mode, and safe mode switches off.").isFalse();
|
||||
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.FORCE_EXIT))
|
||||
.describedAs("Force exist safe mode at any time, safe mode should always switches off.")
|
||||
.isFalse();
|
||||
}
|
||||
|
||||
private SafeMode verifyAndGetSafeModeInstance(FileSystem fs) {
|
||||
Assertions.assertThat(fs)
|
||||
.describedAs("File system %s must be an instance of %s", fs, SafeMode.class.getClass())
|
||||
.isInstanceOf(SafeMode.class);
|
||||
return (SafeMode) fs;
|
||||
}
|
||||
}
|
|
@ -225,6 +225,15 @@ public abstract class AbstractFSContractTestBase extends Assert
|
|||
new Path(getContract().getTestPath(), filepath));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a path whose name ends with the name of this method.
|
||||
* @return a path implicitly unique amongst all methods in this class
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
protected Path methodPath() throws IOException {
|
||||
return path(methodName.getMethodName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Take a simple path like "/something" and turn it into
|
||||
* a qualified path against the test FS.
|
||||
|
|
|
@ -21,7 +21,11 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||
import org.apache.commons.collections.list.TreeList;
|
||||
import org.apache.hadoop.fs.LeaseRecoverable;
|
||||
import org.apache.hadoop.fs.SafeMode;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -57,6 +61,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.fs.QuotaUsage;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.StorageStatistics;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
|
@ -88,7 +93,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||
|
@ -122,6 +126,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
|
||||
|
||||
/****************************************************************
|
||||
* Implementation of the abstract FileSystem for the DFS system.
|
||||
* This object is the way end-user code interacts with a Hadoop
|
||||
|
@ -131,7 +137,7 @@ import java.util.Optional;
|
|||
@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase" })
|
||||
@InterfaceStability.Unstable
|
||||
public class DistributedFileSystem extends FileSystem
|
||||
implements KeyProviderTokenIssuer {
|
||||
implements KeyProviderTokenIssuer, LeaseRecoverable, SafeMode {
|
||||
private Path workingDir;
|
||||
private URI uri;
|
||||
|
||||
|
@ -289,6 +295,7 @@ public class DistributedFileSystem extends FileSystem
|
|||
* @return true if the file is already closed
|
||||
* @throws IOException if an error occurs
|
||||
*/
|
||||
@Override
|
||||
public boolean recoverLease(final Path f) throws IOException {
|
||||
Path absF = fixRelativePart(f);
|
||||
return new FileSystemLinkResolver<Boolean>() {
|
||||
|
@ -1473,6 +1480,63 @@ public class DistributedFileSystem extends FileSystem
|
|||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
|
||||
* HdfsConstants.SafeModeAction,boolean)
|
||||
*/
|
||||
@Override
|
||||
public boolean setSafeMode(SafeModeAction action)
|
||||
throws IOException {
|
||||
return setSafeMode(action, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter, leave or get safe mode.
|
||||
*
|
||||
* @param action
|
||||
* One of SafeModeAction.ENTER, SafeModeAction.LEAVE and
|
||||
* SafeModeAction.GET.
|
||||
* @param isChecked
|
||||
* If true check only for Active NNs status, else check first NN's
|
||||
* status.
|
||||
*/
|
||||
@Override
|
||||
@SuppressWarnings("deprecation")
|
||||
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
|
||||
throws IOException {
|
||||
return this.setSafeMode(convertToClientProtocolSafeModeAction(action), isChecked);
|
||||
}
|
||||
|
||||
/**
|
||||
* Translating the {@link SafeModeAction} into {@link HdfsConstants.SafeModeAction}
|
||||
* that is used by {@link DFSClient#setSafeMode(HdfsConstants.SafeModeAction, boolean)}.
|
||||
*
|
||||
* @param action any supported action listed in {@link SafeModeAction}.
|
||||
* @return the converted {@link HdfsConstants.SafeModeAction}.
|
||||
* @throws UnsupportedOperationException if the provided {@link SafeModeAction} cannot be
|
||||
* translated.
|
||||
*/
|
||||
private static HdfsConstants.SafeModeAction convertToClientProtocolSafeModeAction(
|
||||
SafeModeAction action) {
|
||||
switch (action) {
|
||||
case ENTER:
|
||||
return HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
|
||||
case LEAVE:
|
||||
return HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
|
||||
case FORCE_EXIT:
|
||||
return HdfsConstants.SafeModeAction.SAFEMODE_FORCE_EXIT;
|
||||
case GET:
|
||||
return HdfsConstants.SafeModeAction.SAFEMODE_GET;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unsupported safe mode action " + action);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter, leave or get safe mode.
|
||||
*
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,
|
||||
* boolean)
|
||||
*
|
||||
* @deprecated please instead use {@link #setSafeMode(SafeModeAction)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean setSafeMode(HdfsConstants.SafeModeAction action)
|
||||
throws IOException {
|
||||
return setSafeMode(action, false);
|
||||
|
@ -1483,12 +1547,18 @@ public class DistributedFileSystem extends FileSystem
|
|||
*
|
||||
* @param action
|
||||
* One of SafeModeAction.ENTER, SafeModeAction.LEAVE and
|
||||
* SafeModeAction.GET
|
||||
* SafeModeAction.GET.
|
||||
* @param isChecked
|
||||
* If true check only for Active NNs status, else check first NN's
|
||||
* status
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(SafeModeAction, boolean)
|
||||
* status.
|
||||
*
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,
|
||||
* boolean)
|
||||
*
|
||||
* @deprecated please instead use
|
||||
* {@link DistributedFileSystem#setSafeMode(SafeModeAction, boolean)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean setSafeMode(HdfsConstants.SafeModeAction action,
|
||||
boolean isChecked) throws IOException {
|
||||
return dfs.setSafeMode(action, isChecked);
|
||||
|
@ -1525,7 +1595,7 @@ public class DistributedFileSystem extends FileSystem
|
|||
}
|
||||
|
||||
/**
|
||||
* enable/disable/check restoreFaileStorage
|
||||
* enable/disable/check restoreFaileStorage.
|
||||
*
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
|
||||
*/
|
||||
|
@ -1888,7 +1958,7 @@ public class DistributedFileSystem extends FileSystem
|
|||
* when there is an issue communicating with the NameNode
|
||||
*/
|
||||
public boolean isInSafeMode() throws IOException {
|
||||
return setSafeMode(SafeModeAction.SAFEMODE_GET, true);
|
||||
return setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
|
||||
}
|
||||
|
||||
/** @see org.apache.hadoop.hdfs.client.HdfsAdmin#allowSnapshot(Path) */
|
||||
|
@ -2217,6 +2287,7 @@ public class DistributedFileSystem extends FileSystem
|
|||
* @throws FileNotFoundException if the file does not exist.
|
||||
* @throws IOException If an I/O error occurred
|
||||
*/
|
||||
@Override
|
||||
public boolean isFileClosed(final Path src) throws IOException {
|
||||
Path absF = fixRelativePart(src);
|
||||
return new FileSystemLinkResolver<Boolean>() {
|
||||
|
@ -3440,6 +3511,15 @@ public class DistributedFileSystem extends FileSystem
|
|||
if (cap.isPresent()) {
|
||||
return cap.get();
|
||||
}
|
||||
// this switch is for features which are in the DFS client but not
|
||||
// (yet/ever) in the WebHDFS API.
|
||||
switch (validatePathCapabilityArgs(path, capability)) {
|
||||
case CommonPathCapabilities.LEASE_RECOVERABLE:
|
||||
return true;
|
||||
default:
|
||||
// fall through
|
||||
}
|
||||
|
||||
return super.hasPathCapability(p, capability);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ public class HdfsUtils {
|
|||
* @param uri the HDFS URI. Note that the URI path is ignored.
|
||||
* @return true if HDFS is healthy; false, otherwise.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public static boolean isHealthy(URI uri) {
|
||||
//check scheme
|
||||
final String scheme = uri.getScheme();
|
||||
|
|
|
@ -72,8 +72,8 @@ import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
|
|||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -1011,27 +1011,27 @@ public class TestRouterRpc {
|
|||
@Test
|
||||
public void testProxySetSafemode() throws Exception {
|
||||
boolean routerSafemode =
|
||||
routerProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
|
||||
routerProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, false);
|
||||
boolean nnSafemode =
|
||||
nnProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
|
||||
nnProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, false);
|
||||
assertEquals(nnSafemode, routerSafemode);
|
||||
|
||||
routerSafemode =
|
||||
routerProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, true);
|
||||
routerProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
|
||||
nnSafemode =
|
||||
nnProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, true);
|
||||
nnProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
|
||||
assertEquals(nnSafemode, routerSafemode);
|
||||
|
||||
assertFalse(routerProtocol.setSafeMode(
|
||||
SafeModeAction.SAFEMODE_GET, false));
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_GET, false));
|
||||
assertTrue(routerProtocol.setSafeMode(
|
||||
SafeModeAction.SAFEMODE_ENTER, false));
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false));
|
||||
assertTrue(routerProtocol.setSafeMode(
|
||||
SafeModeAction.SAFEMODE_GET, false));
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_GET, false));
|
||||
assertFalse(routerProtocol.setSafeMode(
|
||||
SafeModeAction.SAFEMODE_LEAVE, false));
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false));
|
||||
assertFalse(routerProtocol.setSafeMode(
|
||||
SafeModeAction.SAFEMODE_GET, false));
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_GET, false));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -488,6 +488,7 @@ public class DFSAdmin extends FsShell {
|
|||
* Gives a report on how the FileSystem is doing.
|
||||
* @exception IOException if the filesystem does not exist.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public void report(String[] argv, int i) throws IOException {
|
||||
DistributedFileSystem dfs = getDFS();
|
||||
FsStatus ds = dfs.getStatus();
|
||||
|
@ -634,6 +635,7 @@ public class DFSAdmin extends FsShell {
|
|||
* @param idx The index of the command that is being processed.
|
||||
* @exception IOException if the filesystem does not exist.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public void setSafeMode(String[] argv, int idx) throws IOException {
|
||||
if (idx != argv.length - 1) {
|
||||
printUsage("-safemode");
|
||||
|
@ -687,6 +689,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
private boolean waitExitSafeMode(DistributedFileSystem dfs, boolean inSafeMode)
|
||||
throws IOException {
|
||||
while (inSafeMode) {
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.contract.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.contract.AbstractContractLeaseRecoveryTest;
|
||||
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||
|
||||
/**
|
||||
* Test lease recovery on HDFS.
|
||||
*/
|
||||
public class TestHDFSContractLeaseRecovery extends AbstractContractLeaseRecoveryTest {
|
||||
|
||||
@BeforeClass
|
||||
public static void createCluster() throws IOException {
|
||||
HDFSContract.createCluster();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void teardownCluster() throws IOException {
|
||||
HDFSContract.destroyCluster();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractFSContract createContract(Configuration conf) {
|
||||
return new HDFSContract(conf);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.contract.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.contract.AbstractContractSafeModeTest;
|
||||
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||
|
||||
/**
|
||||
* Test safe mode interface on HDFS.
|
||||
*/
|
||||
public class TestHDFSContractSafeMode extends AbstractContractSafeModeTest {
|
||||
|
||||
@BeforeClass
|
||||
public static void createCluster() throws IOException {
|
||||
HDFSContract.createCluster();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void teardownCluster() throws IOException {
|
||||
HDFSContract.destroyCluster();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractFSContract createContract(Configuration conf) {
|
||||
return new HDFSContract(conf);
|
||||
}
|
||||
}
|
|
@ -32,9 +32,9 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.*;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.*;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
|
@ -972,9 +972,9 @@ public class TestBlockStoragePolicy {
|
|||
checkDirectoryListing(barList, WARM, HOT);
|
||||
|
||||
// restart namenode with checkpoint to make sure the fsimage is correct
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
dirList = fs.getClient().listPaths(dir.toString(),
|
||||
HdfsFileStatus.EMPTY_NAME).getPartialListing();
|
||||
|
|
|
@ -63,6 +63,7 @@ import org.apache.hadoop.fs.FileChecksum;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.client.HdfsUtils;
|
||||
|
@ -72,7 +73,6 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -1127,7 +1127,7 @@ public class TestDFSClientRetries {
|
|||
|
||||
//enter safe mode
|
||||
assertTrue(HdfsUtils.isHealthy(uri));
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
assertFalse(HdfsUtils.isHealthy(uri));
|
||||
|
||||
//leave safe mode in a new thread
|
||||
|
@ -1138,7 +1138,7 @@ public class TestDFSClientRetries {
|
|||
//sleep and then leave safe mode
|
||||
TimeUnit.SECONDS.sleep(30);
|
||||
assertFalse(HdfsUtils.isHealthy(uri));
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
assertTrue(HdfsUtils.isHealthy(uri));
|
||||
} catch (Exception e) {
|
||||
exceptions.add(e);
|
||||
|
|
|
@ -34,8 +34,8 @@ import java.util.regex.Pattern;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
|
@ -239,7 +239,7 @@ public class TestDFSUpgrade {
|
|||
// make sure that rolling upgrade cannot be started
|
||||
try {
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
fail();
|
||||
} catch(RemoteException re) {
|
||||
|
@ -383,7 +383,7 @@ public class TestDFSUpgrade {
|
|||
// make sure that rolling upgrade cannot be started
|
||||
try {
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
fail();
|
||||
} catch(RemoteException re) {
|
||||
|
|
|
@ -69,13 +69,13 @@ import org.apache.hadoop.fs.FsServerDefaults;
|
|||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
|
||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||
|
@ -571,9 +571,9 @@ public class TestEncryptionZones {
|
|||
assertZonePresent(null, zonePath.toString());
|
||||
}
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
assertNumZones(numZones);
|
||||
assertEquals("Unexpected number of encryption zones!", numZones, cluster
|
||||
|
@ -606,9 +606,9 @@ public class TestEncryptionZones {
|
|||
|
||||
// Verify rootDir ez is present after restarting the NameNode
|
||||
// and saving/loading from fsimage.
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
assertNumZones(numZones);
|
||||
assertZonePresent(null, rootDir.toString());
|
||||
|
@ -1201,9 +1201,9 @@ public class TestEncryptionZones {
|
|||
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
|
||||
System.out.println(report);
|
||||
Assert.assertEquals(0, report.getDiffList().size());
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
report =
|
||||
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
|
||||
|
@ -1718,7 +1718,7 @@ public class TestEncryptionZones {
|
|||
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
|
||||
dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
|
||||
DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||
fs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
fs.saveNamespace();
|
||||
|
||||
File originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
|
||||
|
|
|
@ -23,12 +23,12 @@ import org.apache.hadoop.fs.ContentSummary;
|
|||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -236,9 +236,9 @@ public class TestErasureCodingPolicies {
|
|||
fs.getErasureCodingPolicy(disabledPolicy));
|
||||
|
||||
// Also check loading disabled EC policies from fsimage
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNodes();
|
||||
|
||||
Assert.assertEquals("Dir does not have policy set",
|
||||
|
|
|
@ -25,10 +25,10 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.After;
|
||||
|
@ -168,9 +168,9 @@ public class TestErasureCodingPolicyWithSnapshot {
|
|||
ecSnap);
|
||||
|
||||
// save namespace, restart namenode, and check ec policy correctness.
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
|
||||
ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
|
@ -286,13 +286,13 @@ public class TestGetBlocks {
|
|||
assertEquals(blkLocsSize, locs.length);
|
||||
assertFalse(fs.isInSafeMode());
|
||||
LOG.info("Entering safe mode");
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
LOG.info("Entered safe mode");
|
||||
assertTrue(fs.isInSafeMode());
|
||||
getBlocksWithException(namenode, info, replicationFactor, 0,
|
||||
RemoteException.class,
|
||||
"Cannot execute getBlocks. Name node is in safe mode.");
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
assertFalse(fs.isInSafeMode());
|
||||
} finally {
|
||||
if (fs != null) {
|
||||
|
|
|
@ -24,13 +24,18 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FileSystemContractBaseTest;
|
||||
import org.apache.hadoop.fs.LeaseRecoverable;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeMode;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonPathCapabilities.LEASE_RECOVERABLE;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
|
@ -67,4 +72,17 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
|
|||
public void testAppend() throws IOException {
|
||||
AppendTestUtil.testAppend(fs, new Path("/testAppend/f"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFileSystemCapabilities() throws Exception {
|
||||
final Path p = new Path("testFileSystemCapabilities");
|
||||
// ViewFileSystem does not support LeaseRecoverable and SafeMode.
|
||||
if (fs instanceof DistributedFileSystem) {
|
||||
final boolean leaseRecovery = fs.hasPathCapability(p, LEASE_RECOVERABLE);
|
||||
assertThat(leaseRecovery).describedAs("path capabilities %s=%s in %s", LEASE_RECOVERABLE,
|
||||
leaseRecovery, fs).isTrue();
|
||||
assertThat(fs).describedAs("filesystem %s", fs).isInstanceOf(LeaseRecoverable.class);
|
||||
assertThat(fs).describedAs("filesystem %s", fs).isInstanceOf(SafeMode.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,9 +32,9 @@ import java.util.concurrent.TimeoutException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
|
||||
|
@ -211,7 +211,7 @@ public class TestMiniDFSCluster {
|
|||
new MiniDFSCluster.Builder(conf, testDataCluster4).build();
|
||||
try {
|
||||
DistributedFileSystem dfs = cluster4.getFileSystem();
|
||||
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
cluster4.shutdown();
|
||||
} finally {
|
||||
while(cluster4.isClusterUp()){
|
||||
|
|
|
@ -40,9 +40,9 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
|
||||
|
@ -109,9 +109,9 @@ public class TestRollingUpgrade {
|
|||
runCmd(dfsadmin, true, "-rollingUpgrade");
|
||||
|
||||
//start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
//query rolling upgrade
|
||||
runCmd(dfsadmin, true, "-rollingUpgrade", "query");
|
||||
|
@ -136,9 +136,9 @@ public class TestRollingUpgrade {
|
|||
Assert.assertTrue(dfs.exists(bar));
|
||||
Assert.assertTrue(dfs.exists(baz));
|
||||
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.saveNamespace();
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
// Ensure directories exist after restart
|
||||
|
@ -216,9 +216,9 @@ public class TestRollingUpgrade {
|
|||
dfs.mkdirs(foo);
|
||||
|
||||
//start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
LOG.info("START\n" + info1);
|
||||
|
||||
//query rolling upgrade
|
||||
|
@ -375,9 +375,9 @@ public class TestRollingUpgrade {
|
|||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
|
||||
//start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
dfs.mkdirs(bar);
|
||||
Assert.assertTrue(dfs.exists(foo));
|
||||
|
@ -577,13 +577,13 @@ public class TestRollingUpgrade {
|
|||
cluster.waitActive();
|
||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
// start rolling upgrade
|
||||
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
queryForPreparation(dfs);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.saveNamespace();
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
|
||||
|
@ -746,9 +746,9 @@ public class TestRollingUpgrade {
|
|||
snn.doCheckpoint();
|
||||
|
||||
//start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
dfs.mkdirs(new Path("/test/bar"));
|
||||
// do checkpoint in SNN again
|
||||
|
|
|
@ -24,8 +24,8 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
|
||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||
|
@ -100,9 +100,9 @@ public class TestRollingUpgradeDowngrade {
|
|||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
NNStorage storage = spy(cluster.getNameNode().getFSImage().getStorage());
|
||||
int futureVersion = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1;
|
||||
doReturn(futureVersion).when(storage).getServiceLayoutVersion();
|
||||
|
|
|
@ -22,8 +22,8 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
|
||||
|
@ -102,10 +102,10 @@ public class TestRollingUpgradeRollback {
|
|||
dfs.mkdirs(foo);
|
||||
|
||||
// start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
Assert.assertEquals(0,
|
||||
dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
// create new directory
|
||||
dfs.mkdirs(bar);
|
||||
|
||||
|
@ -164,10 +164,10 @@ public class TestRollingUpgradeRollback {
|
|||
dfs.mkdirs(foo);
|
||||
|
||||
// start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
Assert.assertEquals(0,
|
||||
dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
// create new directory
|
||||
dfs.mkdirs(bar);
|
||||
dfs.close();
|
||||
|
|
|
@ -36,12 +36,12 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -134,10 +134,10 @@ public class TestSafeMode {
|
|||
dfs = cluster.getFileSystem();
|
||||
|
||||
assertTrue("No datanode is started. Should be in SafeMode",
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
|
||||
dfs.setSafeMode(SafeModeAction.GET));
|
||||
|
||||
// manually set safemode.
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
|
||||
// now bring up the datanode and wait for it to be active.
|
||||
cluster.startDataNodes(conf, 1, true, null, null);
|
||||
|
@ -149,9 +149,9 @@ public class TestSafeMode {
|
|||
} catch (InterruptedException ignored) {}
|
||||
|
||||
assertTrue("should still be in SafeMode",
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
|
||||
dfs.setSafeMode(SafeModeAction.GET));
|
||||
assertFalse("should not be in SafeMode",
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -311,7 +311,7 @@ public class TestSafeMode {
|
|||
final Path file1 = new Path("/file1");
|
||||
DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
|
||||
assertTrue("Could not enter SM",
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
|
||||
dfs.setSafeMode(SafeModeAction.ENTER));
|
||||
try {
|
||||
FSRun fsRun = new FSRun() {
|
||||
@Override
|
||||
|
@ -339,10 +339,10 @@ public class TestSafeMode {
|
|||
InterruptedException {
|
||||
final Path file1 = new Path("/file1");
|
||||
|
||||
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
|
||||
assertFalse(dfs.setSafeMode(SafeModeAction.GET));
|
||||
DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
|
||||
assertTrue("Could not enter SM",
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
|
||||
dfs.setSafeMode(SafeModeAction.ENTER));
|
||||
|
||||
runFsFun("Set quota while in SM", new FSRun() {
|
||||
@Override
|
||||
|
@ -492,7 +492,7 @@ public class TestSafeMode {
|
|||
}
|
||||
|
||||
assertFalse("Could not leave SM",
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -536,11 +536,11 @@ public class TestSafeMode {
|
|||
dfs = cluster.getFileSystem();
|
||||
|
||||
// Enter safemode.
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
assertTrue("State was expected to be in safemode.", dfs.isInSafeMode());
|
||||
|
||||
// Exit safemode.
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
|
||||
}
|
||||
|
||||
|
@ -561,11 +561,11 @@ public class TestSafeMode {
|
|||
NameNode namenode = cluster.getNameNode();
|
||||
|
||||
// manually set safemode.
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
assertTrue("should still be in SafeMode", namenode.isInSafeMode());
|
||||
// getBlock locations should still work since block locations exists
|
||||
checkGetBlockLocationsWorks(fs, file1);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
assertFalse("should not be in SafeMode", namenode.isInSafeMode());
|
||||
|
||||
|
||||
|
@ -595,7 +595,7 @@ public class TestSafeMode {
|
|||
}
|
||||
|
||||
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
assertFalse("Should not be in safemode", namenode.isInSafeMode());
|
||||
checkGetBlockLocationsWorks(fs, file1);
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -1478,9 +1479,9 @@ public class TestBalancer {
|
|||
// Run balancer
|
||||
final BalancerParameters p = BalancerParameters.DEFAULT;
|
||||
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
// Rolling upgrade should abort the balancer
|
||||
assertEquals(ExitStatus.UNFINALIZED_UPGRADE.getExitCode(),
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -44,7 +45,6 @@ import org.apache.hadoop.hdfs.TestRollingUpgrade;
|
|||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
@ -169,7 +169,7 @@ public class TestDataNodeRollingUpgrade {
|
|||
|
||||
private void startRollingUpgrade() throws Exception {
|
||||
LOG.info("Starting rolling upgrade");
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
final DFSAdmin dfsadmin = new DFSAdmin(conf);
|
||||
TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
|
||||
triggerHeartBeats();
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Test;
|
||||
|
@ -79,9 +79,9 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
|
|||
|
||||
makeTestFile(path, 0, true);
|
||||
// checkpoint
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
|
||||
// Stat the file and check that the lazyPersist flag is returned back.
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSStripedOutputStream;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
|
@ -30,7 +31,6 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
|
@ -191,9 +191,9 @@ public class TestAddStripedBlocks {
|
|||
|
||||
// save namespace, restart namenode, and check
|
||||
dfs = cluster.getFileSystem();
|
||||
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.saveNamespace();
|
||||
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
fsdir = cluster.getNamesystem().getFSDirectory();
|
||||
fileNode = fsdir.getINode4Write(file.toString()).asFile();
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
|
|||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.InvalidRequestException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
|
||||
|
@ -75,7 +76,6 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
|
||||
|
@ -680,9 +680,9 @@ public class TestCacheDirectives {
|
|||
.setPath(new Path("/image")).setPool(imagePool).build());
|
||||
|
||||
// Save a new image to force a fresh fsimage download
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.saveNamespace();
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
// Checkpoint again forcing a reload of FSN state
|
||||
boolean fetchImage = secondary.doCheckpoint();
|
||||
|
|
|
@ -24,11 +24,11 @@ import static org.junit.Assert.fail;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
||||
|
@ -95,7 +95,7 @@ public class TestCheckPointForSecurityTokens {
|
|||
}
|
||||
|
||||
// Saving image in safe mode should succeed
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
try {
|
||||
admin.run(args);
|
||||
} catch(Exception e) {
|
||||
|
|
|
@ -1148,6 +1148,7 @@ public class TestCheckpoint {
|
|||
* Tests save namespace.
|
||||
*/
|
||||
@Test
|
||||
@SuppressWarnings("deprecation")
|
||||
public void testSaveNamespace() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
DistributedFileSystem fs = null;
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -66,8 +67,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
|
@ -140,9 +139,9 @@ public class TestFSImage {
|
|||
.of(SyncFlag.UPDATE_LENGTH));
|
||||
|
||||
// checkpoint
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNode();
|
||||
cluster.waitActive();
|
||||
|
@ -340,9 +339,9 @@ public class TestFSImage {
|
|||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
|
||||
0);
|
||||
File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
|
||||
|
@ -382,9 +381,9 @@ public class TestFSImage {
|
|||
long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime();
|
||||
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(1).build();
|
||||
|
@ -503,9 +502,9 @@ public class TestFSImage {
|
|||
DFSTestUtil.writeFile(fs, file_3_2, new String(bytes));
|
||||
|
||||
// Save namespace and restart NameNode
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
fs = cluster.getFileSystem();
|
||||
|
@ -782,9 +781,9 @@ public class TestFSImage {
|
|||
.of(SyncFlag.UPDATE_LENGTH));
|
||||
|
||||
// checkpoint
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNode();
|
||||
cluster.waitActive();
|
||||
|
@ -841,9 +840,9 @@ public class TestFSImage {
|
|||
DFSTestUtil.enableAllECPolicies(fs);
|
||||
|
||||
// Save namespace and restart NameNode
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
cluster.waitActive();
|
||||
|
@ -864,9 +863,9 @@ public class TestFSImage {
|
|||
newPolicy = ret[0].getPolicy();
|
||||
|
||||
// Save namespace and restart NameNode
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
cluster.waitActive();
|
||||
|
@ -912,9 +911,9 @@ public class TestFSImage {
|
|||
|
||||
|
||||
// Save namespace and restart NameNode
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
cluster.waitActive();
|
||||
|
@ -934,9 +933,9 @@ public class TestFSImage {
|
|||
// 2. Disable an erasure coding policy
|
||||
fs.disableErasureCodingPolicy(ecPolicy.getName());
|
||||
// Save namespace and restart NameNode
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
cluster.waitActive();
|
||||
|
@ -972,9 +971,9 @@ public class TestFSImage {
|
|||
|
||||
fs.removeErasureCodingPolicy(ecPolicy.getName());
|
||||
// Save namespace and restart NameNode
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
cluster.waitActive();
|
||||
|
|
|
@ -27,13 +27,12 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -82,9 +81,9 @@ public class TestFSImageWithAcl {
|
|||
fs.removeAcl(p);
|
||||
|
||||
if (persistNamespace) {
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
cluster.restartNameNode();
|
||||
|
@ -249,9 +248,9 @@ public class TestFSImageWithAcl {
|
|||
private void restart(DistributedFileSystem fs, boolean persistNamespace)
|
||||
throws IOException {
|
||||
if (persistNamespace) {
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
cluster.restartNameNode();
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
|
@ -28,7 +29,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
|
||||
|
@ -185,9 +185,9 @@ public class TestFSImageWithSnapshot {
|
|||
hdfs = cluster.getFileSystem();
|
||||
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(NUM_DATANODES).build();
|
||||
|
@ -212,9 +212,9 @@ public class TestFSImageWithSnapshot {
|
|||
assertEquals(root, sdirs[0].getFullPath());
|
||||
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(NUM_DATANODES).build();
|
||||
|
@ -406,9 +406,9 @@ public class TestFSImageWithSnapshot {
|
|||
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
|
||||
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
|
@ -430,9 +430,9 @@ public class TestFSImageWithSnapshot {
|
|||
out.close();
|
||||
|
||||
// save namespace
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
// append to the empty file
|
||||
out = hdfs.append(file);
|
||||
|
@ -500,9 +500,9 @@ public class TestFSImageWithSnapshot {
|
|||
hdfs = cluster.getFileSystem();
|
||||
|
||||
// save namespace to fsimage
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
|
@ -582,9 +582,9 @@ public class TestFSImageWithSnapshot {
|
|||
void restartCluster() throws Exception {
|
||||
final File before = dumpTree2File("before.txt");
|
||||
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
|
@ -638,9 +638,9 @@ public class TestFSImageWithSnapshot {
|
|||
hdfs.delete(renamePath1, true);
|
||||
hdfs.deleteSnapshot(dir1, "s1");
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(NUM_DATANODES).build();
|
||||
|
|
|
@ -24,11 +24,12 @@ import java.util.Map;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -123,9 +124,9 @@ public class TestFSImageWithXAttr {
|
|||
private void restart(DistributedFileSystem fs, boolean persistNamespace)
|
||||
throws IOException {
|
||||
if (persistNamespace) {
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
cluster.restartNameNode();
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.AppendTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -55,7 +56,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
|
@ -941,9 +941,9 @@ public class TestFileTruncate {
|
|||
@Test
|
||||
public void testTruncateEditLogLoad() throws IOException {
|
||||
// purge previously accumulated edits
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
|
||||
int toTruncate = 1;
|
||||
|
@ -1039,7 +1039,7 @@ public class TestFileTruncate {
|
|||
assertFileLength(snapshotFile, startingFileSize);
|
||||
|
||||
// empty edits and restart
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
cluster.restartNameNode(true);
|
||||
assertThat("Total block count should be unchanged from start-up",
|
||||
|
@ -1234,10 +1234,10 @@ public class TestFileTruncate {
|
|||
final DFSAdmin dfsadmin = new DFSAdmin(cluster.getConfiguration(0));
|
||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
//start rolling upgrade
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"});
|
||||
assertEquals("could not prepare for rolling upgrade", 0, status);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
Path dir = new Path("/testTruncateWithRollingUpgrade");
|
||||
fs.mkdirs(dir);
|
||||
|
|
|
@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
|
@ -32,7 +33,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
|
@ -164,9 +164,9 @@ public class TestLeaseManager {
|
|||
file.getFileUnderConstructionFeature().getClientName(), file);
|
||||
|
||||
// Save a fsimage.
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
cluster.getNameNodeRpc().saveNamespace(0,0);
|
||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
// Restart the namenode.
|
||||
cluster.restartNameNode(true);
|
||||
|
|
|
@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -106,9 +106,9 @@ public class TestNameNodeRetryCacheMetrics {
|
|||
}
|
||||
|
||||
private void trySaveNamespace() throws IOException {
|
||||
filesystem.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
filesystem.setSafeMode(SafeModeAction.ENTER);
|
||||
filesystem.saveNamespace();
|
||||
filesystem.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
filesystem.setSafeMode(SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,13 +24,13 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.log4j.Level;
|
||||
|
@ -133,9 +133,9 @@ public class TestNestedEncryptionZones {
|
|||
|
||||
// Checkpoint and restart NameNode, to test if nested EZs can be loaded
|
||||
// from fsimage
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNodes();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -723,9 +724,9 @@ public class TestQuotaByStorageType {
|
|||
assertEquals(file1Len, ssdConsumed);
|
||||
|
||||
// Restart the namenode with checkpoint to make sure fsImage is correct
|
||||
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
dfs.setSafeMode(SafeModeAction.ENTER);
|
||||
dfs.saveNamespace();
|
||||
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
refreshClusterState();
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
|
|||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.FileSystemTestWrapper;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -51,7 +52,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
|
||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.ReencryptionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
|
||||
|
@ -538,9 +538,9 @@ public class TestReencryption {
|
|||
|
||||
final FileEncryptionInfo fei0new = getFileEncryptionInfo(encFile0);
|
||||
final FileEncryptionInfo fei9new = getFileEncryptionInfo(encFile9);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
restartClusterDisableReencrypt();
|
||||
|
||||
assertKeyVersionEquals(encFile0, fei0new);
|
||||
|
@ -651,9 +651,9 @@ public class TestReencryption {
|
|||
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
|
||||
waitForQueuedZones(1);
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
// verify after loading from fsimage the command is loaded
|
||||
restartClusterDisableReencrypt();
|
||||
|
@ -716,9 +716,9 @@ public class TestReencryption {
|
|||
}
|
||||
|
||||
// Verify the same is true after loading from FSImage
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
restartClusterDisableReencrypt();
|
||||
waitForQueuedZones(numZones - cancelled.size());
|
||||
|
@ -1714,7 +1714,7 @@ public class TestReencryption {
|
|||
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
|
||||
waitForReencryptedFiles(zone.toString(), 5);
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
getEzManager().resumeReencryptForTesting();
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
Thread.sleep(1000);
|
||||
|
@ -1727,7 +1727,7 @@ public class TestReencryption {
|
|||
assertEquals(5, zs.getFilesReencrypted());
|
||||
}
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
// trigger the background thread to run, without having to
|
||||
// wait for DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY
|
||||
getHandler().notifyNewSubmission();
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
|
@ -55,7 +56,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
|
@ -212,7 +213,7 @@ public class TestSaveNamespace {
|
|||
doAnEdit(fsn, 1);
|
||||
|
||||
// Save namespace - this may fail, depending on fault injected
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
try {
|
||||
fsn.saveNamespace(0, 0);
|
||||
if (shouldFail) {
|
||||
|
@ -226,7 +227,7 @@ public class TestSaveNamespace {
|
|||
}
|
||||
}
|
||||
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
// Should still be able to perform edits
|
||||
doAnEdit(fsn, 2);
|
||||
|
||||
|
@ -281,7 +282,7 @@ public class TestSaveNamespace {
|
|||
|
||||
try {
|
||||
doAnEdit(fsn, 1);
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
|
||||
// Save namespace - should mark the first storage dir as faulty
|
||||
// since it's not traversable.
|
||||
|
@ -420,7 +421,7 @@ public class TestSaveNamespace {
|
|||
doAnEdit(fsn, 1);
|
||||
|
||||
// Save namespace
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
try {
|
||||
fsn.saveNamespace(0, 0);
|
||||
fail("saveNamespace did not fail even when all directories failed!");
|
||||
|
@ -469,7 +470,7 @@ public class TestSaveNamespace {
|
|||
doAnEdit(fsn, 2);
|
||||
|
||||
// Save namespace
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.saveNamespace(0, 0);
|
||||
|
||||
// Now shut down and restart the NN
|
||||
|
@ -503,7 +504,7 @@ public class TestSaveNamespace {
|
|||
doAnEdit(fsn, 1);
|
||||
assertEquals(2, fsn.getEditLog().getLastWrittenTxId());
|
||||
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.saveNamespace(0, 0);
|
||||
|
||||
// 2 more txns: END the first segment, BEGIN a new one
|
||||
|
@ -560,7 +561,7 @@ public class TestSaveNamespace {
|
|||
final Canceler canceler = new Canceler();
|
||||
|
||||
// Save namespace
|
||||
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fsn.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
try {
|
||||
Future<Void> saverFuture = pool.submit(new Callable<Void>() {
|
||||
@Override
|
||||
|
@ -628,9 +629,9 @@ public class TestSaveNamespace {
|
|||
out = fs.create(new Path("/test-source/foo")); // don't close
|
||||
fs.rename(new Path("/test-source/"), new Path("/test-target/"));
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
cluster.getNameNodeRpc().saveNamespace(0, 0);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
} finally {
|
||||
IOUtils.cleanupWithLogger(LOG, out, fs);
|
||||
cluster.shutdown();
|
||||
|
@ -646,9 +647,9 @@ public class TestSaveNamespace {
|
|||
try {
|
||||
cluster.getNamesystem().leaseManager.addLease("me",
|
||||
INodeId.ROOT_INODE_ID + 1);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
cluster.getNameNodeRpc().saveNamespace(0, 0);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
@ -678,9 +679,9 @@ public class TestSaveNamespace {
|
|||
file.getFileWithSnapshotFeature().getDiffs() != null);
|
||||
|
||||
// saveNamespace
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
cluster.getNameNodeRpc().saveNamespace(0, 0);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
// restart namenode
|
||||
cluster.restartNameNode(true);
|
||||
|
@ -708,7 +709,7 @@ public class TestSaveNamespace {
|
|||
final FSImage fsimage = cluster.getNameNode().getFSImage();
|
||||
final long before = fsimage.getStorage().getMostRecentCheckpointTxId();
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
// set the timewindow to 1 hour and tx gap to 1000, which means that if
|
||||
// there is a checkpoint during the past 1 hour or the tx number happening
|
||||
// after the latest checkpoint is <= 1000, this saveNamespace request
|
||||
|
@ -723,14 +724,14 @@ public class TestSaveNamespace {
|
|||
// do another checkpoint. this time set the timewindow to 1s
|
||||
// we should see a new checkpoint
|
||||
cluster.getNameNodeRpc().saveNamespace(1, 1000);
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
after = fsimage.getStorage().getMostRecentCheckpointTxId();
|
||||
Assert.assertTrue(after > before);
|
||||
|
||||
fs.mkdirs(new Path("/foo/bar/baz")); // 3 new tx
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.setSafeMode(SafeModeAction.ENTER);
|
||||
cluster.getNameNodeRpc().saveNamespace(3600, 5); // 3 + end/start segment
|
||||
long after2 = fsimage.getStorage().getMostRecentCheckpointTxId();
|
||||
// no checkpoint should be made
|
||||
|
|
|
@ -22,12 +22,12 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.Test;
|
||||
|
@ -157,7 +157,7 @@ public class TestHAMetrics {
|
|||
|
||||
// Save fsimage so that nn does not build up namesystem by replaying
|
||||
// edits, but load from the image.
|
||||
((DistributedFileSystem)fs).setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
((DistributedFileSystem)fs).setSafeMode(SafeModeAction.ENTER);
|
||||
((DistributedFileSystem)fs).saveNamespace();
|
||||
|
||||
// Flip the two namenodes and restart the standby, which will load
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.ha.ServiceFailedException;
|
||||
|
@ -53,7 +54,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
|
||||
|
@ -312,7 +312,7 @@ public class TestObserverNode {
|
|||
assertSentTo(2);
|
||||
|
||||
// Set observer to safe mode.
|
||||
dfsCluster.getFileSystem(2).setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
dfsCluster.getFileSystem(2).setSafeMode(SafeModeAction.ENTER);
|
||||
|
||||
// Mock block manager for observer to generate some fake blocks which
|
||||
// will trigger the (retriable) safe mode exception.
|
||||
|
@ -335,7 +335,7 @@ public class TestObserverNode {
|
|||
Mockito.reset(bmSpy);
|
||||
|
||||
// Remove safe mode on observer, request should still go to it.
|
||||
dfsCluster.getFileSystem(2).setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
dfsCluster.getFileSystem(2).setSafeMode(SafeModeAction.LEAVE);
|
||||
dfs.open(testPath).close();
|
||||
assertSentTo(2);
|
||||
}
|
||||
|
|
|
@ -23,11 +23,11 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
@ -582,9 +582,9 @@ public class TestRandomOpsWithSnapshots {
|
|||
// Randomly decide whether we want to do a check point
|
||||
if (generator.nextBoolean()) {
|
||||
LOG.info("checkClusterHealth, doing a checkpoint on NN.");
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
/** Restart name node making sure loading from image successfully */
|
||||
|
|
|
@ -24,11 +24,11 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.*;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||
|
@ -560,9 +560,9 @@ public class TestRenameWithSnapshots {
|
|||
SnapshotTestHelper.dumpTree2File(fsdir, fsnMiddle);
|
||||
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(REPL).build();
|
||||
|
@ -1809,9 +1809,9 @@ public class TestRenameWithSnapshots {
|
|||
// correct. Note that when loading fsimage, foo and bar will be converted
|
||||
// back to normal INodeDirectory and INodeFile since they do not store any
|
||||
// snapshot data
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(REPL).build();
|
||||
|
@ -2483,9 +2483,9 @@ public class TestRenameWithSnapshots {
|
|||
deleteSnapshot(sub1, snap6);
|
||||
deleteSnapshot(sub1, snap3);
|
||||
// save namespace and restart Namenode
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -47,7 +48,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -207,9 +207,9 @@ public class TestSnapshot {
|
|||
SnapshotTestHelper.dumpTree2File(fsdir, fsnMiddle);
|
||||
|
||||
// save namespace and restart cluster
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||
.numDataNodes(REPLICATION).build();
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
|
@ -40,7 +41,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
|
@ -166,9 +166,9 @@ public class TestSnapshotDeletion {
|
|||
.getNumSnapshottableDirs());
|
||||
assertEquals(0, cluster.getNamesystem().getSnapshotManager()
|
||||
.getSnapshottableDirs().length);
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(0);
|
||||
}
|
||||
|
||||
|
@ -1141,10 +1141,10 @@ public class TestSnapshotDeletion {
|
|||
hdfs.allowSnapshot(foo);
|
||||
|
||||
hdfs.createSnapshot(foo, snapshotName);
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
hdfs.deleteSnapshot(foo, snapshotName);
|
||||
hdfs.delete(bar, true);
|
||||
hdfs.delete(foo, true);
|
||||
|
@ -1185,9 +1185,9 @@ public class TestSnapshotDeletion {
|
|||
hdfs.deleteSnapshot(st, "s1");
|
||||
hdfs.deleteSnapshot(st, "s2");
|
||||
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
@ -1270,9 +1270,9 @@ public class TestSnapshotDeletion {
|
|||
exception.expectMessage(error);
|
||||
hdfs.concat(dest, files);
|
||||
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
@ -1307,9 +1307,9 @@ public class TestSnapshotDeletion {
|
|||
|
||||
hdfs.deleteSnapshot(st, "s1");
|
||||
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
@ -1351,9 +1351,9 @@ public class TestSnapshotDeletion {
|
|||
|
||||
hdfs.deleteSnapshot(st, "s1");
|
||||
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
|
|
@ -28,13 +28,12 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
@ -156,9 +155,9 @@ public class TestXAttrWithSnapshot {
|
|||
hdfs.getSnapshotDiffReport(path, snapshotName, "");
|
||||
System.out.println(report);
|
||||
Assert.assertEquals(0, report.getDiffList().size());
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
report =
|
||||
hdfs.getSnapshotDiffReport(path, snapshotName, "");
|
||||
|
|
|
@ -88,6 +88,7 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -96,7 +97,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockType;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
|
@ -327,9 +327,9 @@ public class TestOfflineImageViewer {
|
|||
filesECCount++;
|
||||
|
||||
// Write results to the fsimage file
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
|
||||
hdfs.setSafeMode(SafeModeAction.LEAVE, false);
|
||||
|
||||
// Determine location of fsimage file
|
||||
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
|
||||
|
@ -897,7 +897,7 @@ public class TestOfflineImageViewer {
|
|||
}
|
||||
|
||||
// Write results to the fsimage file
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
hdfs.saveNamespace();
|
||||
// Determine location of fsimage file
|
||||
fsimageFile =
|
||||
|
|
|
@ -43,11 +43,11 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -148,7 +148,7 @@ public class TestOfflineImageViewerForAcl {
|
|||
writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
|
||||
|
||||
// Write results to the fsimage file
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
hdfs.saveNamespace();
|
||||
|
||||
// Determine the location of the fsimage file
|
||||
|
|
|
@ -30,9 +30,9 @@ import org.apache.hadoop.fs.ContentSummary;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -102,7 +102,7 @@ public class TestOfflineImageViewerForContentSummary {
|
|||
symLinkSummaryForDirContainsFromDFS = hdfs.getContentSummary(new Path(
|
||||
"/dirForLinks"));
|
||||
// Write results to the fsimage file
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
hdfs.saveNamespace();
|
||||
// Determine the location of the fsimage file
|
||||
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
|
||||
|
|
|
@ -35,11 +35,11 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.web.JsonUtil;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
|
@ -80,7 +80,7 @@ public class TestOfflineImageViewerForXAttr {
|
|||
hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
|
||||
hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
|
||||
// Write results to the fsimage file
|
||||
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
|
||||
hdfs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
hdfs.saveNamespace();
|
||||
|
||||
List<XAttr> attributes = new ArrayList<XAttr>();
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -33,7 +34,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
|
@ -121,7 +121,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
|
|||
|
||||
private void testFileSize(int numBytes) throws IOException,
|
||||
UnresolvedLinkException, SnapshotAccessControlException {
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fs.setSafeMode(SafeModeAction.LEAVE);
|
||||
File orgFsimage = null;
|
||||
Path file = new Path("/eczone/striped");
|
||||
FSDataOutputStream out = fs.create(file, true);
|
||||
|
@ -130,7 +130,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
|
|||
out.close();
|
||||
|
||||
// Write results to the fsimage file
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||
fs.setSafeMode(SafeModeAction.ENTER, false);
|
||||
fs.saveNamespace();
|
||||
|
||||
// Determine location of fsimage file
|
||||
|
|
|
@ -32,11 +32,11 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.mapreduce.JobID;
|
||||
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
|
||||
|
@ -87,9 +87,9 @@ public class TestHistoryFileManager {
|
|||
public void cleanTest() throws Exception {
|
||||
new File(coreSitePath).delete();
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
SafeModeAction.LEAVE);
|
||||
dfsCluster2.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
SafeModeAction.LEAVE);
|
||||
}
|
||||
|
||||
private String getDoneDirNameForTest() {
|
||||
|
@ -119,7 +119,7 @@ public class TestHistoryFileManager {
|
|||
@Test
|
||||
public void testCreateDirsWithFileSystem() throws Exception {
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
SafeModeAction.LEAVE);
|
||||
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
|
||||
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), true);
|
||||
}
|
||||
|
@ -127,9 +127,9 @@ public class TestHistoryFileManager {
|
|||
@Test
|
||||
public void testCreateDirsWithAdditionalFileSystem() throws Exception {
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
SafeModeAction.LEAVE);
|
||||
dfsCluster2.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
SafeModeAction.LEAVE);
|
||||
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
|
||||
Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode());
|
||||
|
||||
|
@ -157,7 +157,7 @@ public class TestHistoryFileManager {
|
|||
@Test
|
||||
public void testCreateDirsWithFileSystemInSafeMode() throws Exception {
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
SafeModeAction.ENTER);
|
||||
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
|
||||
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), false);
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ public class TestHistoryFileManager {
|
|||
public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout()
|
||||
throws Exception {
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
SafeModeAction.ENTER);
|
||||
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
|
||||
new Thread() {
|
||||
@Override
|
||||
|
@ -183,7 +183,7 @@ public class TestHistoryFileManager {
|
|||
try {
|
||||
Thread.sleep(500);
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
|
||||
SafeModeAction.LEAVE);
|
||||
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
|
||||
} catch (Exception ex) {
|
||||
Assert.fail(ex.toString());
|
||||
|
@ -198,7 +198,7 @@ public class TestHistoryFileManager {
|
|||
public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout()
|
||||
throws Exception {
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
SafeModeAction.ENTER);
|
||||
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
|
||||
final ControlledClock clock = new ControlledClock();
|
||||
clock.setTime(1);
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
package org.apache.hadoop.mapreduce.v2.hs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -57,7 +57,7 @@ public class TestHistoryFileManagerInitWithNonRunningDFS {
|
|||
try {
|
||||
// set up a cluster with its name node in safe mode
|
||||
dfsCluster.getFileSystem().setSafeMode(
|
||||
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||
SafeModeAction.ENTER);
|
||||
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
|
||||
|
||||
HistoryFileManager hfm = new HistoryFileManager();
|
||||
|
|
Loading…
Reference in New Issue