HDDS-2154. Fix Checkstyle issues (#1475)
This commit is contained in:
parent
da1c67e0c2
commit
126ef77a81
|
@ -133,7 +133,8 @@ public final class HddsClientUtils {
|
||||||
*
|
*
|
||||||
* @throws IllegalArgumentException
|
* @throws IllegalArgumentException
|
||||||
*/
|
*/
|
||||||
public static void verifyResourceName(String resName) throws IllegalArgumentException {
|
public static void verifyResourceName(String resName)
|
||||||
|
throws IllegalArgumentException {
|
||||||
if (resName == null) {
|
if (resName == null) {
|
||||||
throw new IllegalArgumentException("Bucket or Volume name is null");
|
throw new IllegalArgumentException("Bucket or Volume name is null");
|
||||||
}
|
}
|
||||||
|
@ -141,7 +142,8 @@ public final class HddsClientUtils {
|
||||||
if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH ||
|
if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH ||
|
||||||
resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) {
|
resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Bucket or Volume length is illegal, valid length is 3-63 characters");
|
"Bucket or Volume length is illegal, "
|
||||||
|
+ "valid length is 3-63 characters");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (resName.charAt(0) == '.' || resName.charAt(0) == '-') {
|
if (resName.charAt(0) == '.' || resName.charAt(0) == '-') {
|
||||||
|
@ -151,7 +153,8 @@ public final class HddsClientUtils {
|
||||||
|
|
||||||
if (resName.charAt(resName.length() - 1) == '.' ||
|
if (resName.charAt(resName.length() - 1) == '.' ||
|
||||||
resName.charAt(resName.length() - 1) == '-') {
|
resName.charAt(resName.length() - 1) == '-') {
|
||||||
throw new IllegalArgumentException("Bucket or Volume name cannot end with a period or dash");
|
throw new IllegalArgumentException("Bucket or Volume name "
|
||||||
|
+ "cannot end with a period or dash");
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean isIPv4 = true;
|
boolean isIPv4 = true;
|
||||||
|
|
|
@ -25,7 +25,8 @@ import java.util.NoSuchElementException;
|
||||||
/**
|
/**
|
||||||
* LevelDB store iterator.
|
* LevelDB store iterator.
|
||||||
*/
|
*/
|
||||||
public class LevelDBStoreIterator implements MetaStoreIterator< MetadataStore.KeyValue > {
|
public class LevelDBStoreIterator
|
||||||
|
implements MetaStoreIterator<MetadataStore.KeyValue> {
|
||||||
|
|
||||||
|
|
||||||
private DBIterator levelDBIterator;
|
private DBIterator levelDBIterator;
|
||||||
|
|
|
@ -26,7 +26,8 @@ import java.util.NoSuchElementException;
|
||||||
/**
|
/**
|
||||||
* RocksDB store iterator.
|
* RocksDB store iterator.
|
||||||
*/
|
*/
|
||||||
public class RocksDBStoreIterator implements MetaStoreIterator< MetadataStore.KeyValue > {
|
public class RocksDBStoreIterator
|
||||||
|
implements MetaStoreIterator<MetadataStore.KeyValue> {
|
||||||
|
|
||||||
private RocksIterator rocksDBIterator;
|
private RocksIterator rocksDBIterator;
|
||||||
|
|
||||||
|
@ -43,7 +44,8 @@ public class RocksDBStoreIterator implements MetaStoreIterator< MetadataStore.Ke
|
||||||
@Override
|
@Override
|
||||||
public MetadataStore.KeyValue next() {
|
public MetadataStore.KeyValue next() {
|
||||||
if (rocksDBIterator.isValid()) {
|
if (rocksDBIterator.isValid()) {
|
||||||
MetadataStore.KeyValue value = MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
|
MetadataStore.KeyValue value =
|
||||||
|
MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
|
||||||
.value());
|
.value());
|
||||||
rocksDBIterator.next();
|
rocksDBIterator.next();
|
||||||
return value;
|
return value;
|
||||||
|
|
|
@ -85,7 +85,8 @@ public interface TableCache<CACHEKEY extends CacheKey,
|
||||||
* full cache. It return's {@link CacheResult} with null
|
* full cache. It return's {@link CacheResult} with null
|
||||||
* and status as {@link CacheResult.CacheStatus#NOT_EXIST}.
|
* and status as {@link CacheResult.CacheStatus#NOT_EXIST}.
|
||||||
*
|
*
|
||||||
* If cache clean up policy is {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means
|
* If cache clean up policy is
|
||||||
|
* {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means
|
||||||
* table cache is partial cache. It return's {@link CacheResult} with
|
* table cache is partial cache. It return's {@link CacheResult} with
|
||||||
* null and status as MAY_EXIST.
|
* null and status as MAY_EXIST.
|
||||||
*
|
*
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.utils;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.lang3.tuple.ImmutablePair;
|
import org.apache.commons.lang3.tuple.ImmutablePair;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
@ -27,6 +28,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
|
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||||
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
|
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
|
||||||
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
|
@ -68,13 +70,14 @@ public class TestMetadataStore {
|
||||||
public ExpectedException expectedException = ExpectedException.none();
|
public ExpectedException expectedException = ExpectedException.none();
|
||||||
private MetadataStore store;
|
private MetadataStore store;
|
||||||
private File testDir;
|
private File testDir;
|
||||||
|
|
||||||
public TestMetadataStore(String metadataImpl) {
|
public TestMetadataStore(String metadataImpl) {
|
||||||
this.storeImpl = metadataImpl;
|
this.storeImpl = metadataImpl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Parameters
|
@Parameters
|
||||||
public static Collection<Object[]> data() {
|
public static Collection<Object[]> data() {
|
||||||
return Arrays.asList(new Object[][]{
|
return Arrays.asList(new Object[][] {
|
||||||
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
|
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
|
||||||
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
|
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
|
||||||
});
|
});
|
||||||
|
@ -121,7 +124,8 @@ public class TestMetadataStore {
|
||||||
|
|
||||||
//As database is empty, check whether iterator is working as expected or
|
//As database is empty, check whether iterator is working as expected or
|
||||||
// not.
|
// not.
|
||||||
MetaStoreIterator< MetadataStore.KeyValue > metaStoreIterator = dbStore.iterator();
|
MetaStoreIterator<MetadataStore.KeyValue> metaStoreIterator =
|
||||||
|
dbStore.iterator();
|
||||||
assertFalse(metaStoreIterator.hasNext());
|
assertFalse(metaStoreIterator.hasNext());
|
||||||
try {
|
try {
|
||||||
metaStoreIterator.next();
|
metaStoreIterator.next();
|
||||||
|
@ -162,7 +166,6 @@ public class TestMetadataStore {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMetaStoreConfigDifferentFromType() throws IOException {
|
public void testMetaStoreConfigDifferentFromType() throws IOException {
|
||||||
|
|
||||||
|
@ -199,7 +202,6 @@ public class TestMetadataStore {
|
||||||
GenericTestUtils.LogCapturer logCapturer =
|
GenericTestUtils.LogCapturer logCapturer =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
|
GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
|
||||||
|
|
||||||
|
|
||||||
File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
|
File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
|
||||||
+ "-" + storeImpl.toLowerCase() + "-test");
|
+ "-" + storeImpl.toLowerCase() + "-test");
|
||||||
MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
|
MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
|
||||||
|
|
|
@ -448,7 +448,8 @@ public class ContainerStateMachine extends BaseStateMachine {
|
||||||
+ write.getChunkData().getChunkName() + e);
|
+ write.getChunkData().getChunkName() + e);
|
||||||
raftFuture.completeExceptionally(e);
|
raftFuture.completeExceptionally(e);
|
||||||
throw e;
|
throw e;
|
||||||
}}, chunkExecutor);
|
}
|
||||||
|
}, chunkExecutor);
|
||||||
|
|
||||||
writeChunkFutureMap.put(entryIndex, writeChunkFuture);
|
writeChunkFutureMap.put(entryIndex, writeChunkFuture);
|
||||||
LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " +
|
LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " +
|
||||||
|
|
|
@ -145,7 +145,8 @@ public final class OzoneUtils {
|
||||||
*
|
*
|
||||||
* @throws IllegalArgumentException
|
* @throws IllegalArgumentException
|
||||||
*/
|
*/
|
||||||
public static void verifyResourceName(String resName) throws IllegalArgumentException {
|
public static void verifyResourceName(String resName)
|
||||||
|
throws IllegalArgumentException {
|
||||||
HddsClientUtils.verifyResourceName(resName);
|
HddsClientUtils.verifyResourceName(resName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,28 +17,12 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.ozone;
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.LinkedList;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
|
||||||
import org.apache.hadoop.fs.FsShell;
|
import org.apache.hadoop.fs.FsShell;
|
||||||
import org.apache.hadoop.fs.shell.Command;
|
|
||||||
import org.apache.hadoop.fs.shell.CommandFactory;
|
import org.apache.hadoop.fs.shell.CommandFactory;
|
||||||
import org.apache.hadoop.fs.shell.FsCommand;
|
import org.apache.hadoop.fs.shell.FsCommand;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
import org.apache.hadoop.tools.TableListing;
|
|
||||||
import org.apache.hadoop.tracing.TraceUtils;
|
|
||||||
import org.apache.hadoop.util.StringUtils;
|
|
||||||
import org.apache.hadoop.util.Tool;
|
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.core.TraceScope;
|
|
||||||
import org.apache.htrace.core.Tracer;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
/** Provide command line access to a Ozone FileSystem. */
|
/** Provide command line access to a Ozone FileSystem. */
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -51,14 +35,19 @@ public class OzoneFsShell extends FsShell {
|
||||||
* {@link #setConf(Configuration)} with a valid configuration prior
|
* {@link #setConf(Configuration)} with a valid configuration prior
|
||||||
* to running commands.
|
* to running commands.
|
||||||
*/
|
*/
|
||||||
public OzoneFsShell() { this(null); }
|
public OzoneFsShell() {
|
||||||
|
this(null);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct a OzoneFsShell with the given configuration. Commands can be
|
* Construct a OzoneFsShell with the given configuration.
|
||||||
* executed via {@link #run(String[])}
|
*
|
||||||
|
* Commands can be executed via {@link #run(String[])}
|
||||||
* @param conf the hadoop configuration
|
* @param conf the hadoop configuration
|
||||||
*/
|
*/
|
||||||
public OzoneFsShell(Configuration conf) { super(conf); }
|
public OzoneFsShell(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
}
|
||||||
|
|
||||||
protected void registerCommands(CommandFactory factory) {
|
protected void registerCommands(CommandFactory factory) {
|
||||||
// TODO: DFSAdmin subclasses FsShell so need to protect the command
|
// TODO: DFSAdmin subclasses FsShell so need to protect the command
|
||||||
|
@ -75,11 +64,12 @@ public class OzoneFsShell extends FsShell {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* main() has some simple utility methods
|
* Main entry point to execute fs commands.
|
||||||
|
*
|
||||||
* @param argv the command and its arguments
|
* @param argv the command and its arguments
|
||||||
* @throws Exception upon error
|
* @throws Exception upon error
|
||||||
*/
|
*/
|
||||||
public static void main(String argv[]) throws Exception {
|
public static void main(String[] argv) throws Exception {
|
||||||
OzoneFsShell shell = newShellInstance();
|
OzoneFsShell shell = newShellInstance();
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setQuietMode(false);
|
conf.setQuietMode(false);
|
||||||
|
|
|
@ -22,7 +22,6 @@ import java.util.concurrent.Callable;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.client.OzoneBucket;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneClient;
|
import org.apache.hadoop.ozone.client.OzoneClient;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.freon;
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
|
||||||
|
|
||||||
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
|
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
|
||||||
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
|
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
|
||||||
|
|
|
@ -22,7 +22,6 @@ import java.util.concurrent.Callable;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.client.OzoneBucket;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneClient;
|
import org.apache.hadoop.ozone.client.OzoneClient;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue