HDFS-9817. Use SLF4J in new classes. Contributed by Anu Engineer
This commit is contained in:
parent
3df0781aa7
commit
747227e9de
@ -20,8 +20,8 @@
|
|||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.codec.digest.DigestUtils;
|
import org.apache.commons.codec.digest.DigestUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
@ -60,7 +60,8 @@
|
|||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DiskBalancer {
|
public class DiskBalancer {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(DiskBalancer.class);
|
private static final Logger LOG = LoggerFactory.getLogger(DiskBalancer
|
||||||
|
.class);
|
||||||
private final FsDatasetSpi<?> dataset;
|
private final FsDatasetSpi<?> dataset;
|
||||||
private final String dataNodeUUID;
|
private final String dataNodeUUID;
|
||||||
private final BlockMover blockMover;
|
private final BlockMover blockMover;
|
||||||
|
@ -16,8 +16,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
|
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -28,7 +28,8 @@
|
|||||||
* Connector factory creates appropriate connector based on the URL.
|
* Connector factory creates appropriate connector based on the URL.
|
||||||
*/
|
*/
|
||||||
public final class ConnectorFactory {
|
public final class ConnectorFactory {
|
||||||
static final Log LOG = LogFactory.getLog(ConnectorFactory.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(ConnectorFactory.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs an appropriate connector based on the URL.
|
* Constructs an appropriate connector based on the URL.
|
||||||
@ -37,13 +38,13 @@ public final class ConnectorFactory {
|
|||||||
*/
|
*/
|
||||||
public static ClusterConnector getCluster(URI clusterURI, Configuration
|
public static ClusterConnector getCluster(URI clusterURI, Configuration
|
||||||
conf) throws IOException, URISyntaxException {
|
conf) throws IOException, URISyntaxException {
|
||||||
LOG.info("Cluster URI : " + clusterURI);
|
LOG.debug("Cluster URI : {}" , clusterURI);
|
||||||
LOG.info("scheme : " + clusterURI.getScheme());
|
LOG.debug("scheme : {}" , clusterURI.getScheme());
|
||||||
if (clusterURI.getScheme().startsWith("file")) {
|
if (clusterURI.getScheme().startsWith("file")) {
|
||||||
LOG.info("Creating a JsonNodeConnector");
|
LOG.debug("Creating a JsonNodeConnector");
|
||||||
return new JsonNodeConnector(clusterURI.toURL());
|
return new JsonNodeConnector(clusterURI.toURL());
|
||||||
} else {
|
} else {
|
||||||
LOG.info("Creating NameNode connector");
|
LOG.debug("Creating NameNode connector");
|
||||||
return new DBNameNodeConnector(clusterURI, conf);
|
return new DBNameNodeConnector(clusterURI, conf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,8 @@
|
|||||||
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
|
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
@ -42,7 +42,8 @@
|
|||||||
* given cluster.
|
* given cluster.
|
||||||
*/
|
*/
|
||||||
class DBNameNodeConnector implements ClusterConnector {
|
class DBNameNodeConnector implements ClusterConnector {
|
||||||
static final Log LOG = LogFactory.getLog(DBNameNodeConnector.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(DBNameNodeConnector.class);
|
||||||
static final Path DISKBALANCER_ID_PATH = new Path("/system/diskbalancer.id");
|
static final Path DISKBALANCER_ID_PATH = new Path("/system/diskbalancer.id");
|
||||||
private final URI clusterURI;
|
private final URI clusterURI;
|
||||||
private final NameNodeConnector connector;
|
private final NameNodeConnector connector;
|
||||||
|
@ -18,8 +18,8 @@
|
|||||||
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
|
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
||||||
.DiskBalancerDataNode;
|
.DiskBalancerDataNode;
|
||||||
@ -33,7 +33,8 @@
|
|||||||
* A connector that understands JSON data cluster models.
|
* A connector that understands JSON data cluster models.
|
||||||
*/
|
*/
|
||||||
public class JsonNodeConnector implements ClusterConnector {
|
public class JsonNodeConnector implements ClusterConnector {
|
||||||
static final Log LOG = LogFactory.getLog(JsonNodeConnector.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(JsonNodeConnector.class);
|
||||||
private final URL clusterURI;
|
private final URL clusterURI;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
|
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
|
||||||
@ -66,7 +66,8 @@
|
|||||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||||
public class DiskBalancerCluster {
|
public class DiskBalancerCluster {
|
||||||
|
|
||||||
static final Log LOG = LogFactory.getLog(DiskBalancerCluster.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(DiskBalancerCluster.class);
|
||||||
private final Set<String> exclusionList;
|
private final Set<String> exclusionList;
|
||||||
private final Set<String> inclusionList;
|
private final Set<String> inclusionList;
|
||||||
private ClusterConnector clusterConnector;
|
private ClusterConnector clusterConnector;
|
||||||
@ -264,7 +265,7 @@ public void createSnapshot(String snapShotName) throws IOException {
|
|||||||
*/
|
*/
|
||||||
public void createOutPutDirectory() throws IOException {
|
public void createOutPutDirectory() throws IOException {
|
||||||
if (Files.exists(Paths.get(this.getOutput()))) {
|
if (Files.exists(Paths.get(this.getOutput()))) {
|
||||||
LOG.fatal("An output directory already exists at this location. Path : " +
|
LOG.error("An output directory already exists at this location. Path : " +
|
||||||
this.getOutput());
|
this.getOutput());
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"An output directory already exists at this location. Path : " +
|
"An output directory already exists at this location. Path : " +
|
||||||
@ -273,7 +274,7 @@ public void createOutPutDirectory() throws IOException {
|
|||||||
|
|
||||||
File f = new File(this.getOutput());
|
File f = new File(this.getOutput());
|
||||||
if (!f.mkdirs()) {
|
if (!f.mkdirs()) {
|
||||||
LOG.fatal("Unable to create the output directory. Path : " + this
|
LOG.error("Unable to create the output directory. Path : " + this
|
||||||
.getOutput());
|
.getOutput());
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"Unable to create the output directory. Path : " + this.getOutput());
|
"Unable to create the output directory. Path : " + this.getOutput());
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
|
package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.codehaus.jackson.annotate.JsonIgnore;
|
import org.codehaus.jackson.annotate.JsonIgnore;
|
||||||
import org.codehaus.jackson.annotate.JsonProperty;
|
import org.codehaus.jackson.annotate.JsonProperty;
|
||||||
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
|
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
|
||||||
@ -40,7 +40,8 @@
|
|||||||
*/
|
*/
|
||||||
@JsonIgnoreProperties({"sortedQueue", "volumeCount", "idealUsed"})
|
@JsonIgnoreProperties({"sortedQueue", "volumeCount", "idealUsed"})
|
||||||
public class DiskBalancerVolumeSet {
|
public class DiskBalancerVolumeSet {
|
||||||
static final Log LOG = LogFactory.getLog(DiskBalancerVolumeSet.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(DiskBalancerVolumeSet.class);
|
||||||
private final int maxDisks = 256;
|
private final int maxDisks = 256;
|
||||||
|
|
||||||
@JsonProperty("transient")
|
@JsonProperty("transient")
|
||||||
@ -172,7 +173,7 @@ private void skipMisConfiguredVolume(DiskBalancerVolume volume) {
|
|||||||
volume.getStorageType(),
|
volume.getStorageType(),
|
||||||
volume.getUuid());
|
volume.getUuid());
|
||||||
|
|
||||||
LOG.fatal(errMessage);
|
LOG.error(errMessage);
|
||||||
volume.setSkip(true);
|
volume.setSkip(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,8 +18,8 @@
|
|||||||
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
|
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
||||||
.DiskBalancerDataNode;
|
.DiskBalancerDataNode;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||||
@ -42,7 +42,8 @@ public class GreedyPlanner implements Planner {
|
|||||||
public static final long MB = 1024L * 1024L;
|
public static final long MB = 1024L * 1024L;
|
||||||
public static final long GB = MB * 1024L;
|
public static final long GB = MB * 1024L;
|
||||||
public static final long TB = GB * 1024L;
|
public static final long TB = GB * 1024L;
|
||||||
static final Log LOG = LogFactory.getLog(GreedyPlanner.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(GreedyPlanner.class);
|
||||||
private final float threshold;
|
private final float threshold;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -108,13 +109,13 @@ public void balanceVolumeSet(DiskBalancerDataNode node,
|
|||||||
if (!lowVolume.isSkip() && !highVolume.isSkip()) {
|
if (!lowVolume.isSkip() && !highVolume.isSkip()) {
|
||||||
nextStep = computeMove(currentSet, lowVolume, highVolume);
|
nextStep = computeMove(currentSet, lowVolume, highVolume);
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Skipping compute move. lowVolume :" + lowVolume.getPath());
|
LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}",
|
||||||
LOG.debug("Skipping compute move. highVolume :" + highVolume.getPath());
|
lowVolume.getPath(), highVolume.getPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
applyStep(nextStep, currentSet, lowVolume, highVolume);
|
applyStep(nextStep, currentSet, lowVolume, highVolume);
|
||||||
if (nextStep != null) {
|
if (nextStep != null) {
|
||||||
LOG.debug("Step : " + nextStep.toString());
|
LOG.debug("Step : {} ", nextStep.toString());
|
||||||
plan.addStep(nextStep);
|
plan.addStep(nextStep);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -179,9 +180,8 @@ private Step computeMove(DiskBalancerVolumeSet currentSet,
|
|||||||
// This disk cannot take any more data from any disk.
|
// This disk cannot take any more data from any disk.
|
||||||
// Remove it from our computation matrix.
|
// Remove it from our computation matrix.
|
||||||
if (maxLowVolumeCanReceive <= 0) {
|
if (maxLowVolumeCanReceive <= 0) {
|
||||||
LOG.debug(lowVolume.getPath() +
|
LOG.debug("{} Skipping disk from computation. Maximum data size " +
|
||||||
" Skipping disk from computation. Maximum data size " +
|
"achieved.", lowVolume.getPath());
|
||||||
"achieved.");
|
|
||||||
lowVolume.setSkip(true);
|
lowVolume.setSkip(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,9 +191,8 @@ private Step computeMove(DiskBalancerVolumeSet currentSet,
|
|||||||
// This volume cannot give any more data, remove it from the
|
// This volume cannot give any more data, remove it from the
|
||||||
// computation matrix
|
// computation matrix
|
||||||
if (maxHighVolumeCanGive <= 0) {
|
if (maxHighVolumeCanGive <= 0) {
|
||||||
LOG.debug(highVolume.getPath() +
|
LOG.debug(" {} Skipping disk from computation. Minimum data size " +
|
||||||
" Skipping disk from computation. Minimum data size " +
|
"achieved.", highVolume.getPath());
|
||||||
"achieved.");
|
|
||||||
highVolume.setSkip(true);
|
highVolume.setSkip(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,8 +16,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
|
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.slf4j.Logger;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
||||||
.DiskBalancerDataNode;
|
.DiskBalancerDataNode;
|
||||||
|
|
||||||
@ -25,7 +25,8 @@
|
|||||||
* Returns a planner based on the user defined tags.
|
* Returns a planner based on the user defined tags.
|
||||||
*/
|
*/
|
||||||
public final class PlannerFactory {
|
public final class PlannerFactory {
|
||||||
static final Log LOG = LogFactory.getLog(PlannerFactory.class);
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(PlannerFactory.class);
|
||||||
|
|
||||||
public static final String GREEDY_PLANNER = "greedyPlanner";
|
public static final String GREEDY_PLANNER = "greedyPlanner";
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user