HDFS-8962. Clean up checkstyle warnings in o.a.h.hdfs.DfsClientConf. Contributed by Mingliang Liu.

This commit is contained in:
Haohui Mai 2015-08-27 09:02:34 -07:00
parent 574f3a98ce
commit af1e2993c7
10 changed files with 122 additions and 126 deletions

View File

@ -117,7 +117,7 @@ public interface HdfsClientConfigKeys {
"dfs.datanode.hdfs-blocks-metadata.enabled";
boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
static final String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
PREFIX + "replica.accessor.builder.classes";
/** dfs.client.retry configuration properties */

View File

@ -262,7 +262,7 @@ public class DfsClientConf {
return Collections.emptyList();
}
ArrayList<Class<? extends ReplicaAccessorBuilder>> classes =
new ArrayList<Class<? extends ReplicaAccessorBuilder>>();
new ArrayList<>();
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
for (String className: classNames) {
try {
@ -778,35 +778,33 @@ public class DfsClientConf {
}
public String confAsString() {
StringBuilder builder = new StringBuilder();
builder.append("shortCircuitStreamsCacheSize = ").
append(shortCircuitStreamsCacheSize).
append(", shortCircuitStreamsCacheExpiryMs = ").
append(shortCircuitStreamsCacheExpiryMs).
append(", shortCircuitMmapCacheSize = ").
append(shortCircuitMmapCacheSize).
append(", shortCircuitMmapCacheExpiryMs = ").
append(shortCircuitMmapCacheExpiryMs).
append(", shortCircuitMmapCacheRetryTimeout = ").
append(shortCircuitMmapCacheRetryTimeout).
append(", shortCircuitCacheStaleThresholdMs = ").
append(shortCircuitCacheStaleThresholdMs).
append(", socketCacheCapacity = ").
append(socketCacheCapacity).
append(", socketCacheExpiry = ").
append(socketCacheExpiry).
append(", shortCircuitLocalReads = ").
append(shortCircuitLocalReads).
append(", useLegacyBlockReaderLocal = ").
append(useLegacyBlockReaderLocal).
append(", domainSocketDataTraffic = ").
append(domainSocketDataTraffic).
append(", shortCircuitSharedMemoryWatcherInterruptCheckMs = ").
append(shortCircuitSharedMemoryWatcherInterruptCheckMs).
append(", keyProviderCacheExpiryMs = ").
append(keyProviderCacheExpiryMs);
return builder.toString();
return "shortCircuitStreamsCacheSize = "
+ shortCircuitStreamsCacheSize
+ ", shortCircuitStreamsCacheExpiryMs = "
+ shortCircuitStreamsCacheExpiryMs
+ ", shortCircuitMmapCacheSize = "
+ shortCircuitMmapCacheSize
+ ", shortCircuitMmapCacheExpiryMs = "
+ shortCircuitMmapCacheExpiryMs
+ ", shortCircuitMmapCacheRetryTimeout = "
+ shortCircuitMmapCacheRetryTimeout
+ ", shortCircuitCacheStaleThresholdMs = "
+ shortCircuitCacheStaleThresholdMs
+ ", socketCacheCapacity = "
+ socketCacheCapacity
+ ", socketCacheExpiry = "
+ socketCacheExpiry
+ ", shortCircuitLocalReads = "
+ shortCircuitLocalReads
+ ", useLegacyBlockReaderLocal = "
+ useLegacyBlockReaderLocal
+ ", domainSocketDataTraffic = "
+ domainSocketDataTraffic
+ ", shortCircuitSharedMemoryWatcherInterruptCheckMs = "
+ shortCircuitSharedMemoryWatcherInterruptCheckMs
+ ", keyProviderCacheExpiryMs = "
+ keyProviderCacheExpiryMs;
}
}
}

View File

@ -505,6 +505,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8248. Store INodeId instead of the INodeFile object in
BlockInfoContiguous. (wheat9)
HDFS-8962. Clean up checkstyle warnings in o.a.h.hdfs.DfsClientConf.
(Mingliang Liu via wheat9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.unix.DomainSocket;
@ -328,10 +327,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
* If there was another problem.
*/
public BlockReader build() throws IOException {
BlockReader reader = null;
Preconditions.checkNotNull(configuration);
reader = tryToCreateExternalBlockReader();
BlockReader reader = tryToCreateExternalBlockReader();
if (reader != null) {
return reader;
}
@ -432,7 +429,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
"disableLegacyBlockReaderLocal is set.", this);
return null;
}
IOException ioe = null;
IOException ioe;
try {
return BlockReaderLocalLegacy.newBlockReader(conf,
userGroupInformation, configuration, fileName, block, token,

View File

@ -703,10 +703,10 @@ class DataXceiver extends Receiver implements Runnable {
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
int timeoutValue = dnConf.socketTimeout
+ (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int timeoutValue = dnConf.socketTimeout +
(HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout +
(HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
(HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
mirrorSock.setSoTimeout(timeoutValue);
mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);

View File

@ -39,6 +39,7 @@ import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
@ -97,7 +98,7 @@ import com.google.common.collect.Lists;
public class TestDistributedFileSystem {
private static final Random RAN = new Random();
{
static {
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
@ -108,16 +109,17 @@ public class TestDistributedFileSystem {
private HdfsConfiguration getTestConfiguration() {
HdfsConfiguration conf;
if (noXmlDefaults) {
conf = new HdfsConfiguration(false);
String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
conf = new HdfsConfiguration(false);
String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").
getAbsolutePath();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
} else {
conf = new HdfsConfiguration();
conf = new HdfsConfiguration();
}
if (dualPortTesting) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
"localhost:0");
"localhost:0");
}
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
@ -133,14 +135,17 @@ public class TestDistributedFileSystem {
FileSystem fileSys = cluster.getFileSystem();
fileSys.getDelegationToken("");
} finally {
cluster.shutdown();
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testFileSystemCloseAll() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).
build();
URI address = FileSystem.getDefaultUri(conf);
try {
@ -503,7 +508,8 @@ public class TestDistributedFileSystem {
}
/** Checks statistics. -1 indicates do not check for the operations */
private void checkStatistics(FileSystem fs, int readOps, int writeOps, int largeReadOps) {
private void checkStatistics(FileSystem fs, int readOps, int writeOps,
int largeReadOps) {
assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps());
assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps());
assertEquals(largeReadOps, DFSTestUtil.getStatistics(fs).getLargeReadOps());
@ -520,7 +526,8 @@ public class TestDistributedFileSystem {
final Configuration conf = getTestConfiguration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
@ -571,7 +578,8 @@ public class TestDistributedFileSystem {
final Path dir = new Path("/filechecksum");
final int block_size = 1024;
final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
final int buffer_size = conf.getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
//try different number of blocks
@ -607,14 +615,15 @@ public class TestDistributedFileSystem {
System.out.println("webhdfsfoocs=" + webhdfsfoocs);
final Path webhdfsqualified = new Path(webhdfsuri + dir, "foo" + n);
final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified);
final FileChecksum webhdfs_qfoocs =
webhdfs.getFileChecksum(webhdfsqualified);
System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs);
//create a zero byte file
final Path zeroByteFile = new Path(dir, "zeroByteFile" + n);
{
final FSDataOutputStream out = hdfs.create(zeroByteFile, false, buffer_size,
(short)2, block_size);
final FSDataOutputStream out = hdfs.create(zeroByteFile, false,
buffer_size, (short)2, block_size);
out.close();
}
@ -743,9 +752,7 @@ public class TestDistributedFileSystem {
String[] ids = loc.getStorageIds();
// Run it through a set to deduplicate, since there should be no dupes
Set<String> storageIds = new HashSet<>();
for (String id: ids) {
storageIds.add(id);
}
Collections.addAll(storageIds, ids);
assertEquals("Unexpected num storage ids", repl, storageIds.size());
// Make sure these are all valid storage IDs
assertTrue("Unknown storage IDs found!", dnStorageIds.containsAll
@ -1043,9 +1050,7 @@ public class TestDistributedFileSystem {
output.close();
assertTrue("File status should be closed", fs.isFileClosed(file));
} finally {
if (cluster != null) {
cluster.shutdown();
}
cluster.shutdown();
}
}
@ -1060,8 +1065,9 @@ public class TestDistributedFileSystem {
final Path relative = new Path("relative");
fs.create(new Path(relative, "foo")).close();
final List<LocatedFileStatus> retVal = new ArrayList<LocatedFileStatus>();
final RemoteIterator<LocatedFileStatus> iter = fs.listFiles(relative, true);
final List<LocatedFileStatus> retVal = new ArrayList<>();
final RemoteIterator<LocatedFileStatus> iter =
fs.listFiles(relative, true);
while (iter.hasNext()) {
retVal.add(iter.next());
}

View File

@ -113,7 +113,7 @@ public class TestCachingStrategy {
}
private static class TestRecordingCacheTracker extends CacheManipulator {
private final Map<String, Stats> map = new TreeMap<String, Stats>();
private final Map<String, Stats> map = new TreeMap<>();
@Override
public void posixFadviseIfPossible(String name,
@ -365,8 +365,6 @@ public class TestCachingStrategy {
// read file
readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, false);
// verify that we dropped everything from the cache.
Assert.assertNull(stats);
} finally {
if (cluster != null) {
cluster.shutdown();
@ -389,13 +387,10 @@ public class TestCachingStrategy {
FileSystem fs = cluster.getFileSystem();
createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
// verify that we can seek after setDropBehind
FSDataInputStream fis = fs.open(new Path(TEST_PATH));
try {
try (FSDataInputStream fis = fs.open(new Path(TEST_PATH))) {
Assert.assertTrue(fis.read() != -1); // create BlockReader
fis.setDropBehind(false); // clear BlockReader
fis.seek(2); // seek
} finally {
fis.close();
}
} finally {
if (cluster != null) {

View File

@ -71,7 +71,6 @@ import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@ -269,7 +268,7 @@ public class TestShortCircuitCache {
}
// The last two replicas should still be cached.
for (int i = 1; i < pairs.length; i++) {
final Integer iVal = new Integer(i);
final Integer iVal = i;
replicaInfos[i] = cache.fetchOrCreate(
new ExtendedBlockId(i, "test_bp1"),
new ShortCircuitReplicaCreator() {
@ -322,7 +321,7 @@ public class TestShortCircuitCache {
};
final long HOUR_IN_MS = 60 * 60 * 1000;
for (int i = 0; i < pairs.length; i++) {
final Integer iVal = new Integer(i);
final Integer iVal = i;
final ExtendedBlockId key = new ExtendedBlockId(i, "test_bp1");
replicaInfos[i] = cache.fetchOrCreate(key,
new ShortCircuitReplicaCreator() {

View File

@ -103,8 +103,8 @@ public class TestShortCircuitLocalRead {
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096),
(short)repl, blockSize);
fileSys.getConf().getInt("io.file.buffer.size", 4096),
(short)repl, blockSize);
return stm;
}
@ -113,13 +113,14 @@ public class TestShortCircuitLocalRead {
checkData(actual, from, expected, actual.length, message);
}
static private void checkData(byte[] actual, int from, byte[] expected, int len,
String message) {
static private void checkData(byte[] actual, int from, byte[] expected,
int len, String message) {
for (int idx = 0; idx < len; idx++) {
if (expected[from + idx] != actual[idx]) {
Assert.fail(message + " byte " + (from + idx) + " differs. expected "
+ expected[from + idx] + " actual " + actual[idx] +
"\nexpected: " + StringUtils.byteToHexString(expected, from, from + len) +
Assert.fail(message + " byte " + (from + idx) + " differs. expected " +
expected[from + idx] + " actual " + actual[idx] +
"\nexpected: " +
StringUtils.byteToHexString(expected, from, from + len) +
"\nactual: " + StringUtils.byteToHexString(actual, 0, len));
}
}
@ -272,8 +273,8 @@ public class TestShortCircuitLocalRead {
try {
// check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory", fs.getFileStatus(path)
.isDirectory() == true);
assertTrue("/ should be a directory",
fs.getFileStatus(path).isDirectory());
byte[] fileData = AppendTestUtil.randomBytes(seed, size);
Path file1 = fs.makeQualified(new Path("filelocal.dat"));
@ -399,8 +400,8 @@ public class TestShortCircuitLocalRead {
try {
// check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory", fs.getFileStatus(path)
.isDirectory() == true);
assertTrue("/ should be a directory",
fs.getFileStatus(path).isDirectory());
byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
// create a new file in home directory. Do not close it.
@ -421,7 +422,6 @@ public class TestShortCircuitLocalRead {
instm.seek(skipped);
nread = instm.read(actual, (int)(skipped + nread), 3);
instm.close();
} finally {
fs.close();
cluster.shutdown();
@ -470,14 +470,11 @@ public class TestShortCircuitLocalRead {
File dataFile = cluster.getBlockFile(0, block);
cluster.shutdown();
cluster = null;
RandomAccessFile raf = null;
try {
raf = new RandomAccessFile(dataFile, "rw");
try (RandomAccessFile raf = new RandomAccessFile(dataFile, "rw")) {
raf.setLength(0);
} finally {
if (raf != null) raf.close();
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false).build();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
fsIn = fs.open(TEST_PATH);
@ -536,7 +533,7 @@ public class TestShortCircuitLocalRead {
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
checksum);
//Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
// Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
@ -580,8 +577,10 @@ public class TestShortCircuitLocalRead {
}
@Test(timeout=60000)
public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
public void testReadWithRemoteBlockReader()
throws IOException, InterruptedException {
doTestShortCircuitReadWithRemoteBlockReader(true, 3 * blockSize + 100,
getCurrentUser(), 0, false);
}
/**
@ -589,8 +588,9 @@ public class TestShortCircuitLocalRead {
* through RemoteBlockReader
* @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
int size, String shortCircuitUser, int readOffset,
boolean shortCircuitFails) throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
@ -601,8 +601,7 @@ public class TestShortCircuitLocalRead {
// check that / exists
Path path = new Path("/");
URI uri = cluster.getURI();
assertTrue("/ should be a directory", fs.getFileStatus(path)
.isDirectory() == true);
assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
byte[] fileData = AppendTestUtil.randomBytes(seed, size);
Path file1 = new Path("filelocal.dat");
@ -627,7 +626,7 @@ public class TestShortCircuitLocalRead {
}
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
@ -639,5 +638,4 @@ public class TestShortCircuitLocalRead {
return false;
}
}