HDFS-11969. Block Storage: Convert unnecessary info log levels to debug. Contributed by Mukul Kumar Singh.

This commit is contained in:
Anu Engineer 2017-06-14 19:33:59 -07:00 committed by Owen O'Malley
parent 15cc4007b0
commit 8916c70d90
2 changed files with 1 additions and 23 deletions

View File

@ -402,7 +402,7 @@ public class ContainerCacheFlusher implements Runnable {
// should be flip instead of rewind, because we also need to make sure
// the end position is correct.
blockIDBuffer.flip();
LOG.info("Remaining blocks count {} and {}", blockIDBuffer.remaining(),
LOG.debug("Remaining blocks count {} and {}", blockIDBuffer.remaining(),
blockCount);
while (blockIDBuffer.remaining() >= (Long.SIZE / Byte.SIZE)) {
long blockID = blockIDBuffer.getLong();

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto
.ContainerProtos.GetKeyResponseProto;
import org.apache.hadoop.hdfs.ozone.protocol.proto
.ContainerProtos.KeyData;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset
.LengthInputStream;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
@ -45,7 +44,6 @@ import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.protocol.LocatedContainer;
import org.apache.hadoop.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
@ -73,7 +71,6 @@ import java.io.IOException;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Set;
import java.util.TimeZone;
import java.util.Locale;
import java.util.List;
@ -469,23 +466,4 @@ public final class DistributedStorageHandler implements StorageHandler {
sdf.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
return sdf.format(date);
}
/**
* Translates a set of container locations, ordered such that the first is the
* leader, into a corresponding {@link Pipeline} object.
*
* @param locatedContainer container location
* @return pipeline corresponding to container locations
*/
private static Pipeline newPipelineFromLocatedContainer(
LocatedContainer locatedContainer) {
Set<DatanodeInfo> locations = locatedContainer.getLocations();
String leaderId = locations.iterator().next().getDatanodeUuid();
Pipeline pipeline = new Pipeline(leaderId);
for (DatanodeInfo location : locations) {
pipeline.addMember(location);
}
pipeline.setContainerName(locatedContainer.getContainerName());
return pipeline;
}
}