HADOOP-17720. Replace Guava Sets usage by Hadoop's own Sets in HDFS (#3031)
Signed-off-by: Takanobu Asanuma <tasanuma@apache.org>
This commit is contained in:
parent
2a206c20cc
commit
028ec4704b
|
@ -343,6 +343,38 @@
|
|||
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>de.skuzzle.enforcer</groupId>
|
||||
<artifactId>restrict-imports-enforcer-rule</artifactId>
|
||||
<version>${restrict-imports.enforcer.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>banned-illegal-imports</id>
|
||||
<phase>process-sources</phase>
|
||||
<goals>
|
||||
<goal>enforce</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<rules>
|
||||
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
|
||||
<includeTestCode>true</includeTestCode>
|
||||
<reason>Use hadoop-common provided Sets rather than Guava provided Sets</reason>
|
||||
<bannedImports>
|
||||
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.collect.Sets</bannedImport>
|
||||
<bannedImport>com.google.common.collect.Sets</bannedImport>
|
||||
</bannedImports>
|
||||
</restrictImports>
|
||||
</rules>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.security.GroupMappingServiceProvider;
|
||||
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
public class DummyGroupMapping implements GroupMappingServiceProvider {
|
||||
|
||||
|
|
|
@ -305,6 +305,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
</filesets>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>de.skuzzle.enforcer</groupId>
|
||||
<artifactId>restrict-imports-enforcer-rule</artifactId>
|
||||
<version>${restrict-imports.enforcer.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>banned-illegal-imports</id>
|
||||
<phase>process-sources</phase>
|
||||
<goals>
|
||||
<goal>enforce</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<rules>
|
||||
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
|
||||
<includeTestCode>true</includeTestCode>
|
||||
<reason>Use hadoop-common provided Sets rather than Guava provided Sets</reason>
|
||||
<bannedImports>
|
||||
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.collect.Sets</bannedImport>
|
||||
<bannedImport>com.google.common.collect.Sets</bannedImport>
|
||||
</bannedImports>
|
||||
</restrictImports>
|
||||
</rules>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<profiles>
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.commons.lang3.ClassUtils;
|
|||
import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.security.authorize.Service;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
|
@ -41,8 +42,6 @@ import org.junit.runners.Parameterized.Parameters;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
/**
|
||||
* Test suite covering RouterPolicyProvider. We expect that it contains a
|
||||
* security policy definition for every RPC protocol used in HDFS. The test
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.federation.router;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -40,6 +39,7 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
|
|||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.LambdaTestUtils;
|
||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
|
|
@ -437,6 +437,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
</filesets>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>de.skuzzle.enforcer</groupId>
|
||||
<artifactId>restrict-imports-enforcer-rule</artifactId>
|
||||
<version>${restrict-imports.enforcer.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>banned-illegal-imports</id>
|
||||
<phase>process-sources</phase>
|
||||
<goals>
|
||||
<goal>enforce</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<rules>
|
||||
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
|
||||
<includeTestCode>true</includeTestCode>
|
||||
<reason>Use hadoop-common provided Sets rather than Guava provided Sets</reason>
|
||||
<bannedImports>
|
||||
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.collect.Sets</bannedImport>
|
||||
<bannedImport>com.google.common.collect.Sets</bannedImport>
|
||||
</bannedImports>
|
||||
</restrictImports>
|
||||
</rules>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
|
@ -107,7 +108,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTest
|
|||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.thirdparty.protobuf.BlockingService;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -23,11 +23,6 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
/**
|
||||
* LayoutFlags represent features which the FSImage and edit logs can either
|
||||
* support or not, independently of layout version.
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.qjournal.server;
|
|||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
|
@ -40,6 +39,7 @@ import org.apache.hadoop.ipc.RPC;
|
|||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -22,15 +22,12 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPO
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.util.Time.monotonicNow;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -59,7 +56,10 @@ import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
|
@ -33,6 +32,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
|||
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
|
|
|
@ -28,12 +28,12 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
/**
|
||||
|
@ -177,8 +177,8 @@ class BlockPoolManager {
|
|||
throws IOException {
|
||||
assert Thread.holdsLock(refreshNamenodesLock);
|
||||
|
||||
Set<String> toRefresh = Sets.newLinkedHashSet();
|
||||
Set<String> toAdd = Sets.newLinkedHashSet();
|
||||
Set<String> toRefresh = new LinkedHashSet<>();
|
||||
Set<String> toAdd = new LinkedHashSet<>();
|
||||
Set<String> toRemove;
|
||||
|
||||
synchronized (this) {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
|
|||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
|
||||
|
@ -37,6 +36,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
|||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.VolumeCheckContext;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
|
|
@ -119,12 +119,12 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.util.InstrumentedReadWriteLock;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -40,10 +40,11 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
/**
|
||||
* Manages a collection of Journals. None of the methods are synchronized, it is
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
/**
|
||||
* The NNStorageRetentionManager is responsible for inspecting the storage
|
||||
|
@ -192,7 +191,7 @@ public class NNStorageRetentionManager {
|
|||
return 0L;
|
||||
}
|
||||
|
||||
TreeSet<Long> imageTxIds = Sets.newTreeSet(Collections.reverseOrder());
|
||||
TreeSet<Long> imageTxIds = new TreeSet<>(Collections.reverseOrder());
|
||||
for (FSImageFile image : images) {
|
||||
imageTxIds.add(image.getCheckpointTxId());
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTest
|
|||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -93,6 +92,7 @@ import org.apache.hadoop.util.ExitUtil.ExitException;
|
|||
import org.apache.hadoop.util.GenericOptionsParser;
|
||||
import org.apache.hadoop.util.JvmPauseMonitor;
|
||||
import org.apache.hadoop.util.ServicePlugin;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.GcTimeMonitor;
|
||||
|
|
|
@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.server.datanode.VolumeScanner;
|
|||
import org.apache.hadoop.hdfs.server.namenode.ImageServlet;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -148,7 +149,6 @@ import org.apache.hadoop.util.ToolRunner;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
/**
|
||||
* This class creates a single-process DFS cluster for junit testing.
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.io.IOException;
|
|||
import java.util.*;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
|
@ -48,6 +47,7 @@ import org.apache.hadoop.net.Node;
|
|||
import org.apache.hadoop.security.token.SecretManager;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.Assert;
|
||||
import static org.junit.Assert.fail;
|
||||
import org.junit.Test;
|
||||
|
|
|
@ -80,12 +80,11 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
|
|||
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.LambdaTestUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
public class TestDFSUtil {
|
||||
|
||||
static final String NS1_NN_ADDR = "ns1-nn.example.com:8020";
|
||||
|
|
|
@ -27,12 +27,12 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.commons.lang3.ClassUtils;
|
||||
import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.security.authorize.Service;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
|
|
|
@ -45,13 +45,12 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
|
|||
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
|
||||
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
public class TestHdfsAdmin {
|
||||
|
||||
private static final Path TEST_PATH = new Path("/test");
|
||||
|
|
|
@ -17,10 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.net;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -29,10 +26,13 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.Timeout;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashMap;
|
||||
|
|
|
@ -37,8 +37,6 @@ import java.util.SortedSet;
|
|||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
|
||||
|
@ -53,6 +51,7 @@ import org.apache.hadoop.hdfs.util.Holder;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
@ -62,7 +61,8 @@ import org.mockito.stubbing.Answer;
|
|||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.event.Level;
|
||||
|
||||
|
||||
|
|
|
@ -32,10 +32,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NSConf;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
/**
|
||||
* Tests datanode refresh namenode list functionality.
|
||||
|
|
|
@ -45,8 +45,6 @@ import java.util.Properties;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -61,6 +59,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
|||
import org.apache.hadoop.hdfs.util.Holder;
|
||||
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
|
@ -68,8 +67,9 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.io.Files;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Utility functions for testing fsimage storage.
|
||||
|
|
|
@ -61,7 +61,6 @@ import java.util.regex.Pattern;
|
|||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -119,6 +118,7 @@ import org.apache.hadoop.net.NetworkTopology;
|
|||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
|
@ -128,8 +128,7 @@ import org.junit.After;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A JUnit test for doing fsck.
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -53,7 +54,6 @@ import org.mockito.stubbing.Answer;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
|
||||
|
||||
public class TestNNStorageRetentionManager {
|
||||
|
@ -306,7 +306,7 @@ public class TestNNStorageRetentionManager {
|
|||
Mockito.verify(mockPurger, Mockito.atLeast(0))
|
||||
.markStale(staleLogsCaptor.capture());
|
||||
|
||||
Set<String> capturedPaths = Sets.newLinkedHashSet();
|
||||
Set<String> capturedPaths = new LinkedHashSet<>();
|
||||
// Check images
|
||||
for (FSImageFile captured : imagesPurgedCaptor.getAllValues()) {
|
||||
capturedPaths.add(fileToPath(captured.getFile()));
|
||||
|
@ -336,9 +336,9 @@ public class TestNNStorageRetentionManager {
|
|||
|
||||
private class TestCaseDescription {
|
||||
private final Map<File, FakeRoot> dirRoots = Maps.newLinkedHashMap();
|
||||
private final Set<File> expectedPurgedLogs = Sets.newLinkedHashSet();
|
||||
private final Set<File> expectedPurgedImages = Sets.newLinkedHashSet();
|
||||
private final Set<File> expectedStaleLogs = Sets.newLinkedHashSet();
|
||||
private final Set<File> expectedPurgedLogs = new LinkedHashSet<>();
|
||||
private final Set<File> expectedPurgedImages = new LinkedHashSet<>();
|
||||
private final Set<File> expectedStaleLogs = new LinkedHashSet<>();
|
||||
|
||||
private class FakeRoot {
|
||||
final NameNodeDirType type;
|
||||
|
|
|
@ -33,8 +33,6 @@ import java.util.HashSet;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -51,13 +49,14 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This tests data recovery mode for the NameNode.
|
||||
|
|
|
@ -38,7 +38,6 @@ import java.util.LinkedHashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -48,6 +47,7 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
|
|||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.event.Level;
|
||||
|
|
Loading…
Reference in New Issue