HDDS-2154. Fix Checkstyle issues (#1475)

This commit is contained in:
Elek, Márton 2019-09-19 20:30:33 +02:00 committed by Bharat Viswanadham
parent da1c67e0c2
commit 126ef77a81
11 changed files with 37 additions and 39 deletions

View File

@ -133,7 +133,8 @@ public final class HddsClientUtils {
*
* @throws IllegalArgumentException
*/
public static void verifyResourceName(String resName) throws IllegalArgumentException {
public static void verifyResourceName(String resName)
throws IllegalArgumentException {
if (resName == null) {
throw new IllegalArgumentException("Bucket or Volume name is null");
}
@ -141,7 +142,8 @@ public final class HddsClientUtils {
if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH ||
resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) {
throw new IllegalArgumentException(
"Bucket or Volume length is illegal, valid length is 3-63 characters");
"Bucket or Volume length is illegal, "
+ "valid length is 3-63 characters");
}
if (resName.charAt(0) == '.' || resName.charAt(0) == '-') {
@ -151,7 +153,8 @@ public final class HddsClientUtils {
if (resName.charAt(resName.length() - 1) == '.' ||
resName.charAt(resName.length() - 1) == '-') {
throw new IllegalArgumentException("Bucket or Volume name cannot end with a period or dash");
throw new IllegalArgumentException("Bucket or Volume name "
+ "cannot end with a period or dash");
}
boolean isIPv4 = true;

View File

@ -25,7 +25,8 @@ import java.util.NoSuchElementException;
/**
* LevelDB store iterator.
*/
public class LevelDBStoreIterator implements MetaStoreIterator< MetadataStore.KeyValue > {
public class LevelDBStoreIterator
implements MetaStoreIterator<MetadataStore.KeyValue> {
private DBIterator levelDBIterator;

View File

@ -26,7 +26,8 @@ import java.util.NoSuchElementException;
/**
* RocksDB store iterator.
*/
public class RocksDBStoreIterator implements MetaStoreIterator< MetadataStore.KeyValue > {
public class RocksDBStoreIterator
implements MetaStoreIterator<MetadataStore.KeyValue> {
private RocksIterator rocksDBIterator;
@ -43,8 +44,9 @@ public class RocksDBStoreIterator implements MetaStoreIterator< MetadataStore.Ke
@Override
public MetadataStore.KeyValue next() {
if (rocksDBIterator.isValid()) {
MetadataStore.KeyValue value = MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
.value());
MetadataStore.KeyValue value =
MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
.value());
rocksDBIterator.next();
return value;
}

View File

@ -85,7 +85,8 @@ public interface TableCache<CACHEKEY extends CacheKey,
* full cache. It return's {@link CacheResult} with null
* and status as {@link CacheResult.CacheStatus#NOT_EXIST}.
*
* If cache clean up policy is {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means
* If cache clean up policy is
* {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means
* table cache is partial cache. It return's {@link CacheResult} with
* null and status as MAY_EXIST.
*

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.utils;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.DFSUtil;
@ -27,6 +28,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@ -68,13 +70,14 @@ public class TestMetadataStore {
public ExpectedException expectedException = ExpectedException.none();
private MetadataStore store;
private File testDir;
public TestMetadataStore(String metadataImpl) {
this.storeImpl = metadataImpl;
}
@Parameters
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][]{
return Arrays.asList(new Object[][] {
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
});
@ -121,7 +124,8 @@ public class TestMetadataStore {
//As database is empty, check whether iterator is working as expected or
// not.
MetaStoreIterator< MetadataStore.KeyValue > metaStoreIterator = dbStore.iterator();
MetaStoreIterator<MetadataStore.KeyValue> metaStoreIterator =
dbStore.iterator();
assertFalse(metaStoreIterator.hasNext());
try {
metaStoreIterator.next();
@ -162,7 +166,6 @@ public class TestMetadataStore {
}
@Test
public void testMetaStoreConfigDifferentFromType() throws IOException {
@ -199,7 +202,6 @@ public class TestMetadataStore {
GenericTestUtils.LogCapturer logCapturer =
GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
+ "-" + storeImpl.toLowerCase() + "-test");
MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)

View File

@ -448,7 +448,8 @@ public class ContainerStateMachine extends BaseStateMachine {
+ write.getChunkData().getChunkName() + e);
raftFuture.completeExceptionally(e);
throw e;
}}, chunkExecutor);
}
}, chunkExecutor);
writeChunkFutureMap.put(entryIndex, writeChunkFuture);
LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " +

View File

@ -145,7 +145,8 @@ public final class OzoneUtils {
*
* @throws IllegalArgumentException
*/
public static void verifyResourceName(String resName) throws IllegalArgumentException {
public static void verifyResourceName(String resName)
throws IllegalArgumentException {
HddsClientUtils.verifyResourceName(resName);
}

View File

@ -17,28 +17,12 @@
*/
package org.apache.hadoop.fs.ozone;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Provide command line access to a Ozone FileSystem. */
@InterfaceAudience.Private
@ -51,14 +35,19 @@ public class OzoneFsShell extends FsShell {
* {@link #setConf(Configuration)} with a valid configuration prior
* to running commands.
*/
public OzoneFsShell() { this(null); }
public OzoneFsShell() {
this(null);
}
/**
* Construct a OzoneFsShell with the given configuration. Commands can be
* executed via {@link #run(String[])}
* Construct a OzoneFsShell with the given configuration.
*
* Commands can be executed via {@link #run(String[])}
* @param conf the hadoop configuration
*/
public OzoneFsShell(Configuration conf) { super(conf); }
public OzoneFsShell(Configuration conf) {
super(conf);
}
protected void registerCommands(CommandFactory factory) {
// TODO: DFSAdmin subclasses FsShell so need to protect the command
@ -75,11 +64,12 @@ public class OzoneFsShell extends FsShell {
}
/**
* main() has some simple utility methods
* Main entry point to execute fs commands.
*
* @param argv the command and its arguments
* @throws Exception upon error
*/
public static void main(String argv[]) throws Exception {
public static void main(String[] argv) throws Exception {
OzoneFsShell shell = newShellInstance();
Configuration conf = new Configuration();
conf.setQuietMode(false);

View File

@ -22,7 +22,6 @@ import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.freon;
import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;

View File

@ -22,7 +22,6 @@ import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;