HDFS-5323. Remove some deadcode in BlockManager (Colin Patrick McCabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1530814 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a184fceddc
commit
b8ae4e2a8e
|
@ -306,6 +306,8 @@ Release 2.3.0 - UNRELEASED
|
||||||
HDFS-4510. Cover classes ClusterJspHelper/NamenodeJspHelper with unit
|
HDFS-4510. Cover classes ClusterJspHelper/NamenodeJspHelper with unit
|
||||||
tests. (Andrey Klochkov via kihwal)
|
tests. (Andrey Klochkov via kihwal)
|
||||||
|
|
||||||
|
HDFS-5323. Remove some deadcode in BlockManager (Colin Patrick McCabe)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
||||||
|
|
|
@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
|
import org.apache.hadoop.util.LightWeightGSet;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -90,9 +91,6 @@ public class BlockManager {
|
||||||
static final Log LOG = LogFactory.getLog(BlockManager.class);
|
static final Log LOG = LogFactory.getLog(BlockManager.class);
|
||||||
public static final Log blockLog = NameNode.blockStateChangeLog;
|
public static final Log blockLog = NameNode.blockStateChangeLog;
|
||||||
|
|
||||||
/** Default load factor of map */
|
|
||||||
public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
|
|
||||||
|
|
||||||
private static final String QUEUE_REASON_CORRUPT_STATE =
|
private static final String QUEUE_REASON_CORRUPT_STATE =
|
||||||
"it has the wrong state or generation stamp";
|
"it has the wrong state or generation stamp";
|
||||||
|
|
||||||
|
@ -244,7 +242,8 @@ public class BlockManager {
|
||||||
invalidateBlocks = new InvalidateBlocks(datanodeManager);
|
invalidateBlocks = new InvalidateBlocks(datanodeManager);
|
||||||
|
|
||||||
// Compute the map capacity by allocating 2% of total memory
|
// Compute the map capacity by allocating 2% of total memory
|
||||||
blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR);
|
blocksMap = new BlocksMap(
|
||||||
|
LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
|
||||||
blockplacement = BlockPlacementPolicy.getInstance(
|
blockplacement = BlockPlacementPolicy.getInstance(
|
||||||
conf, stats, datanodeManager.getNetworkTopology());
|
conf, stats, datanodeManager.getNetworkTopology());
|
||||||
pendingReplications = new PendingReplicationBlocks(conf.getInt(
|
pendingReplications = new PendingReplicationBlocks(conf.getInt(
|
||||||
|
|
|
@ -57,11 +57,11 @@ class BlocksMap {
|
||||||
/** Constant {@link LightWeightGSet} capacity. */
|
/** Constant {@link LightWeightGSet} capacity. */
|
||||||
private final int capacity;
|
private final int capacity;
|
||||||
|
|
||||||
private volatile GSet<Block, BlockInfo> blocks;
|
private GSet<Block, BlockInfo> blocks;
|
||||||
|
|
||||||
BlocksMap(final float loadFactor) {
|
BlocksMap(int capacity) {
|
||||||
// Use 2% of total memory to size the GSet capacity
|
// Use 2% of total memory to size the GSet capacity
|
||||||
this.capacity = LightWeightGSet.computeCapacity(2.0, "BlocksMap");
|
this.capacity = capacity;
|
||||||
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
|
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -192,17 +192,10 @@ public class TestNameNodeMetrics {
|
||||||
assertCounter("CreateFileOps", 1L, rb);
|
assertCounter("CreateFileOps", 1L, rb);
|
||||||
assertCounter("FilesCreated", (long)file.depth(), rb);
|
assertCounter("FilesCreated", (long)file.depth(), rb);
|
||||||
|
|
||||||
// Blocks are stored in a hashmap. Compute its capacity, which
|
|
||||||
// doubles every time the number of entries reach the threshold.
|
|
||||||
int threshold = (int)(blockCapacity * BlockManager.DEFAULT_MAP_LOAD_FACTOR);
|
|
||||||
while (threshold < blockCount) {
|
|
||||||
blockCapacity <<= 1;
|
|
||||||
}
|
|
||||||
long filesTotal = file.depth() + 1; // Add 1 for root
|
long filesTotal = file.depth() + 1; // Add 1 for root
|
||||||
rb = getMetrics(NS_METRICS);
|
rb = getMetrics(NS_METRICS);
|
||||||
assertGauge("FilesTotal", filesTotal, rb);
|
assertGauge("FilesTotal", filesTotal, rb);
|
||||||
assertGauge("BlocksTotal", blockCount, rb);
|
assertGauge("BlocksTotal", blockCount, rb);
|
||||||
assertGauge("BlockCapacity", blockCapacity, rb);
|
|
||||||
fs.delete(file, true);
|
fs.delete(file, true);
|
||||||
filesTotal--; // reduce the filecount for deleted file
|
filesTotal--; // reduce the filecount for deleted file
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue