HDFS-11969. Block Storage: Convert unnecessary info log levels to debug. Contributed by Mukul Kumar Singh.

This commit is contained in:
Anu Engineer 2017-06-14 19:33:59 -07:00 committed by Owen O'Malley
parent 15cc4007b0
commit 8916c70d90
2 changed files with 1 additions and 23 deletions

View File

@ -402,7 +402,7 @@ public class ContainerCacheFlusher implements Runnable {
// should be flip instead of rewind, because we also need to make sure // should be flip instead of rewind, because we also need to make sure
// the end position is correct. // the end position is correct.
blockIDBuffer.flip(); blockIDBuffer.flip();
LOG.info("Remaining blocks count {} and {}", blockIDBuffer.remaining(), LOG.debug("Remaining blocks count {} and {}", blockIDBuffer.remaining(),
blockCount); blockCount);
while (blockIDBuffer.remaining() >= (Long.SIZE / Byte.SIZE)) { while (blockIDBuffer.remaining() >= (Long.SIZE / Byte.SIZE)) {
long blockID = blockIDBuffer.getLong(); long blockID = blockIDBuffer.getLong();

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto
.ContainerProtos.GetKeyResponseProto; .ContainerProtos.GetKeyResponseProto;
import org.apache.hadoop.hdfs.ozone.protocol.proto import org.apache.hadoop.hdfs.ozone.protocol.proto
.ContainerProtos.KeyData; .ContainerProtos.KeyData;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset import org.apache.hadoop.hdfs.server.datanode.fsdataset
.LengthInputStream; .LengthInputStream;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs; import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
@ -45,7 +44,6 @@ import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.protocol.LocatedContainer;
import org.apache.hadoop.scm.protocolPB import org.apache.hadoop.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB; .StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.exceptions.OzoneException;
@ -73,7 +71,6 @@ import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Date; import java.util.Date;
import java.util.Set;
import java.util.TimeZone; import java.util.TimeZone;
import java.util.Locale; import java.util.Locale;
import java.util.List; import java.util.List;
@ -469,23 +466,4 @@ public final class DistributedStorageHandler implements StorageHandler {
sdf.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); sdf.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
return sdf.format(date); return sdf.format(date);
} }
/**
* Translates a set of container locations, ordered such that the first is the
* leader, into a corresponding {@link Pipeline} object.
*
* @param locatedContainer container location
* @return pipeline corresponding to container locations
*/
private static Pipeline newPipelineFromLocatedContainer(
LocatedContainer locatedContainer) {
Set<DatanodeInfo> locations = locatedContainer.getLocations();
String leaderId = locations.iterator().next().getDatanodeUuid();
Pipeline pipeline = new Pipeline(leaderId);
for (DatanodeInfo location : locations) {
pipeline.addMember(location);
}
pipeline.setContainerName(locatedContainer.getContainerName());
return pipeline;
}
} }