HDFS-5447. Fix TestJspHelper in branch HDFS-2832.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1538145 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-02 05:21:48 +00:00
parent a891076159
commit cbdeba29bd
3 changed files with 25 additions and 27 deletions

View File

@ -57,3 +57,6 @@ IMPROVEMENTS:
HDFS-5437. Fix TestBlockReport and TestBPOfferService failures. (Arpit
Agarwal)
HDFS-5447. Fix TestJspHelper. (Arpit Agarwal)

View File

@ -43,9 +43,6 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.util.IntrusiveCollection;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* This class extends the DatanodeInfo class with ephemeral information (eg
* health, capacity, what blocks are associated with the Datanode) that is
@ -218,26 +215,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/
public DatanodeDescriptor(DatanodeID nodeID,
String networkLocation) {
this(nodeID, networkLocation, 0, 0, 0, 0);
}
/**
* DatanodeDescriptor constructor
* @param nodeID id of the data node
* @param networkLocation location of the data node in network
* @param cacheCapacity cache capacity of the data node
* @param cacheUsed cache used on the data node
* @param xceiverCount # of data transfers at the data node
*/
public DatanodeDescriptor(DatanodeID nodeID,
String networkLocation,
long cacheCapacity,
long cacheUsed,
int xceiverCount,
int failedVolumes) {
super(nodeID, networkLocation);
updateHeartbeat(StorageReport.EMPTY_ARRAY, cacheCapacity, cacheUsed,
xceiverCount, failedVolumes);
}
/**
@ -638,7 +616,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
return sb.toString();
}
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
@VisibleForTesting
public DatanodeStorageInfo updateStorage(DatanodeStorage s) {
synchronized (storageMap) {
DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
if (storage == null) {

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.DataInputBuffer;
@ -451,10 +453,24 @@ public class TestJspHelper {
1234, 2345, 3456, 4567);
DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "datanode2",
1235, 2346, 3457, 4568);
DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(
dnId1, "rack1", 5l, 3l, 10, 2);
DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(
dnId2, "rack2", 10l, 2l, 20, 1);
// Setup DatanodeDescriptors with one storage each.
DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1");
DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2");
// Update the DatanodeDescriptors with their attached storages.
dnDesc1.updateStorage(new DatanodeStorage("dnStorage1"));
dnDesc2.updateStorage(new DatanodeStorage("dnStorage2"));
StorageReport[] report1 = new StorageReport[] {
new StorageReport("dnStorage1", false, 1024, 100, 924, 100)
};
StorageReport[] report2 = new StorageReport[] {
new StorageReport("dnStorage2", false, 2500, 200, 1848, 200)
};
dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2);
dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1);
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
live.add(dnDesc1);
live.add(dnDesc2);