HDFS-10518. DiskBalancer: Pretty-print json in Query command. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-13 14:11:23 -07:00 committed by Arpit Agarwal
parent af11ab34d0
commit 7e2be5c4a0
5 changed files with 116 additions and 6 deletions

View File

@ -34,6 +34,8 @@ import java.io.IOException;
@InterfaceStability.Unstable @InterfaceStability.Unstable
@JsonInclude(JsonInclude.Include.NON_DEFAULT) @JsonInclude(JsonInclude.Include.NON_DEFAULT)
public class DiskBalancerWorkItem { public class DiskBalancerWorkItem {
private long startTime;
private long secondsElapsed;
private long bytesToCopy; private long bytesToCopy;
private long bytesCopied; private long bytesCopied;
private long errorCount; private long errorCount;
@ -242,4 +244,44 @@ public class DiskBalancerWorkItem {
public void setBandwidth(long bandwidth) { public void setBandwidth(long bandwidth) {
this.bandwidth = bandwidth; this.bandwidth = bandwidth;
} }
/**
* Records the Start time of execution.
* @return startTime
*/
public long getStartTime() {
return startTime;
}
/**
* Sets the Start time.
* @param startTime - Time stamp for start of execution.
*/
public void setStartTime(long startTime) {
this.startTime = startTime;
}
/**
* Gets the number of seconds elapsed from the start time.
*
* The reason why we have this is of time skews. The client's current time
* may not match with the server time stamp, hence the elapsed second
* cannot be computed from only startTime.
*
* @return seconds elapsed from start time.
*/
public long getSecondsElapsed() {
return secondsElapsed;
}
/**
* Sets number of seconds elapsed.
*
* This is updated whenever we update the other counters.
* @param secondsElapsed - seconds elapsed.
*/
public void setSecondsElapsed(long secondsElapsed) {
this.secondsElapsed = secondsElapsed;
}
} }

View File

@ -24,6 +24,7 @@ import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance; import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance;
@ -128,6 +129,7 @@ public class DiskBalancerWorkStatus {
**/ **/
public String currentStateString() throws IOException { public String currentStateString() throws IOException {
ObjectMapper mapper = new ObjectMapper(); ObjectMapper mapper = new ObjectMapper();
mapper.enable(SerializationConfig.Feature.INDENT_OUTPUT);
return mapper.writeValueAsString(currentState); return mapper.writeValueAsString(currentState);
} }

View File

@ -552,6 +552,21 @@ public class DiskBalancer {
* @return FsDatasetSpi * @return FsDatasetSpi
*/ */
FsDatasetSpi getDataset(); FsDatasetSpi getDataset();
/**
* Returns time when this plan started executing.
*
* @return Start time in milliseconds.
*/
long getStartTime();
/**
* Number of seconds elapsed.
*
* @return time in seconds
*/
long getElapsedSeconds();
} }
/** /**
@ -622,6 +637,8 @@ public class DiskBalancer {
private long maxDiskErrors; private long maxDiskErrors;
private int poolIndex; private int poolIndex;
private AtomicBoolean shouldRun; private AtomicBoolean shouldRun;
private long startTime;
private long secondsElapsed;
/** /**
* Constructs diskBalancerMover. * Constructs diskBalancerMover.
@ -897,6 +914,9 @@ public class DiskBalancer {
FsVolumeSpi source = pair.getSource(); FsVolumeSpi source = pair.getSource();
FsVolumeSpi dest = pair.getDest(); FsVolumeSpi dest = pair.getDest();
List<FsVolumeSpi.BlockIterator> poolIters = new LinkedList<>(); List<FsVolumeSpi.BlockIterator> poolIters = new LinkedList<>();
startTime = Time.now();
item.setStartTime(startTime);
secondsElapsed = 0;
if (source.isTransientStorage() || dest.isTransientStorage()) { if (source.isTransientStorage() || dest.isTransientStorage()) {
return; return;
@ -973,9 +993,6 @@ public class DiskBalancer {
block.getNumBytes(), source.getBasePath(), block.getNumBytes(), source.getBasePath(),
dest.getBasePath()); dest.getBasePath());
item.incCopiedSoFar(block.getNumBytes());
item.incBlocksCopied();
// Check for the max throughput constraint. // Check for the max throughput constraint.
// We sleep here to keep the promise that we will not // We sleep here to keep the promise that we will not
// copy more than Max MB/sec. we sleep enough time // copy more than Max MB/sec. we sleep enough time
@ -984,6 +1001,14 @@ public class DiskBalancer {
// we exit via Thread Interrupted exception. // we exit via Thread Interrupted exception.
Thread.sleep(computeDelay(block.getNumBytes(), timeUsed, item)); Thread.sleep(computeDelay(block.getNumBytes(), timeUsed, item));
// We delay updating the info to avoid confusing the user.
// This way we report the copy only if it is under the
// throughput threshold.
item.incCopiedSoFar(block.getNumBytes());
item.incBlocksCopied();
secondsElapsed = TimeUnit.MILLISECONDS.toSeconds(Time.now() -
startTime);
item.setSecondsElapsed(secondsElapsed);
} catch (IOException ex) { } catch (IOException ex) {
LOG.error("Exception while trying to copy blocks. error: {}", ex); LOG.error("Exception while trying to copy blocks. error: {}", ex);
item.incErrorCount(); item.incErrorCount();
@ -1009,5 +1034,25 @@ public class DiskBalancer {
public FsDatasetSpi getDataset() { public FsDatasetSpi getDataset() {
return dataset; return dataset;
} }
/**
* Returns time when this plan started executing.
*
* @return Start time in milliseconds.
*/
@Override
public long getStartTime() {
return startTime;
}
/**
* Number of seconds elapsed.
*
* @return time in seconds
*/
@Override
public long getElapsedSeconds() {
return secondsElapsed;
}
} }
} }

View File

@ -46,6 +46,7 @@ public class QueryCommand extends Command {
" plan running on a given datanode."); " plan running on a given datanode.");
addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results."); addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
} }
/** /**
* Executes the Client Calls. * Executes the Client Calls.
* *

View File

@ -436,6 +436,26 @@ public class TestDiskBalancerWithMockMover {
return this.dataset; return this.dataset;
} }
/**
* Returns time when this plan started executing.
*
* @return Start time in milliseconds.
*/
@Override
public long getStartTime() {
return 0;
}
/**
* Number of seconds elapsed.
*
* @return time in seconds
*/
@Override
public long getElapsedSeconds() {
return 0;
}
public int getRunCount() { public int getRunCount() {
synchronized (runCount) { synchronized (runCount) {
LOG.info("Run count : " + runCount.intValue()); LOG.info("Run count : " + runCount.intValue());