HDFS-6920. Archival Storage: check the storage type of delNodeHintStorage when deleting a replica. Contributed by Tsz Wo Nicholas Sze.

This commit is contained in:
Jing Zhao 2014-08-27 14:08:02 -07:00
parent 8ea20b53a8
commit b7ded466b0
4 changed files with 57 additions and 9 deletions

View File

@ -2771,12 +2771,9 @@ public class BlockManager {
final DatanodeStorageInfo addedNodeStorage final DatanodeStorageInfo addedNodeStorage
= DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, addedNode); = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, addedNode);
while (nonExcess.size() - replication > 0) { while (nonExcess.size() - replication > 0) {
// check if we can delete delNodeHint
final DatanodeStorageInfo cur; final DatanodeStorageInfo cur;
if (firstOne && delNodeHintStorage != null if (useDelHint(firstOne, delNodeHintStorage, addedNodeStorage,
&& (moreThanOne.contains(delNodeHintStorage) moreThanOne, excessTypes)) {
|| (addedNodeStorage != null
&& !moreThanOne.contains(addedNodeStorage)))) {
cur = delNodeHintStorage; cur = delNodeHintStorage;
} else { // regular excessive replica removal } else { // regular excessive replica removal
cur = replicator.chooseReplicaToDelete(bc, b, replication, cur = replicator.chooseReplicaToDelete(bc, b, replication,
@ -2806,6 +2803,27 @@ public class BlockManager {
} }
} }
/** Check if we can use delHint */
static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint,
DatanodeStorageInfo added, List<DatanodeStorageInfo> moreThan1Racks,
List<StorageType> excessTypes) {
if (!isFirst) {
return false; // only consider delHint for the first case
} else if (delHint == null) {
return false; // no delHint
} else if (!excessTypes.remove(delHint.getStorageType())) {
return false; // delHint storage type is not an excess type
} else {
// check if removing delHint reduces the number of racks
if (moreThan1Racks.contains(delHint)) {
return true; // delHint and some other nodes are under the same rack
} else if (added != null && !moreThan1Racks.contains(added)) {
return true; // the added node adds a new rack
}
return false; // removing delHint reduces the number of racks;
}
}
private void addToExcessReplicate(DatanodeInfo dn, Block block) { private void addToExcessReplicate(DatanodeInfo dn, Block block) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(dn.getDatanodeUuid()); LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(dn.getDatanodeUuid());

View File

@ -792,8 +792,15 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
minSpaceStorage = storage; minSpaceStorage = storage;
} }
} }
final DatanodeStorageInfo storage = oldestHeartbeatStorage != null?
oldestHeartbeatStorage : minSpaceStorage; final DatanodeStorageInfo storage;
if (oldestHeartbeatStorage != null) {
storage = oldestHeartbeatStorage;
} else if (minSpaceStorage != null) {
storage = minSpaceStorage;
} else {
return null;
}
excessTypes.remove(storage.getStorageType()); excessTypes.remove(storage.getStorageType());
return storage; return storage;
} }

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -599,5 +601,19 @@ public class TestBlockManager {
new BlockListAsLongs(null, null)); new BlockListAsLongs(null, null));
assertEquals(1, ds.getBlockReportCount()); assertEquals(1, ds.getBlockReportCount());
} }
}
@Test
public void testUseDelHint() {
DatanodeStorageInfo delHint = new DatanodeStorageInfo(
DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id"));
List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint);
List<StorageType> excessTypes = new ArrayList<StorageType>();
excessTypes.add(StorageType.DEFAULT);
Assert.assertTrue(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes));
excessTypes.add(StorageType.SSD);
Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes));
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
@ -935,6 +936,12 @@ public class TestReplicationPolicy {
assertEquals(2, first.size()); assertEquals(2, first.size());
assertEquals(2, second.size()); assertEquals(2, second.size());
List<StorageType> excessTypes = new ArrayList<StorageType>(); List<StorageType> excessTypes = new ArrayList<StorageType>();
{
// test returning null
excessTypes.add(StorageType.SSD);
assertNull(replicator.chooseReplicaToDelete(
null, null, (short)3, first, second, excessTypes));
}
excessTypes.add(StorageType.DEFAULT); excessTypes.add(StorageType.DEFAULT);
DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete( DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second, excessTypes); null, null, (short)3, first, second, excessTypes);