svn merge -c 1482658 from trunk for HDFS-4813. Add volatile to BlocksMap.blocks so that the replication thread can see the updated value.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1482659 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2ed6ce3bfb
commit
9ec6c9e018
|
@ -255,6 +255,9 @@ Release 2.0.5-beta - UNRELEASED
|
||||||
HDFS-4300. TransferFsImage.downloadEditsToStorage should use a tmp file for
|
HDFS-4300. TransferFsImage.downloadEditsToStorage should use a tmp file for
|
||||||
destination. (Andrew Wang via atm)
|
destination. (Andrew Wang via atm)
|
||||||
|
|
||||||
|
HDFS-4813. Add volatile to BlocksMap.blocks so that the replication thread
|
||||||
|
can see the updated value. (Jing Zhao via szetszwo)
|
||||||
|
|
||||||
Release 2.0.4-alpha - 2013-04-25
|
Release 2.0.4-alpha - 2013-04-25
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -59,11 +59,10 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
|
@ -73,7 +72,6 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ class BlocksMap {
|
||||||
/** Constant {@link LightWeightGSet} capacity. */
|
/** Constant {@link LightWeightGSet} capacity. */
|
||||||
private final int capacity;
|
private final int capacity;
|
||||||
|
|
||||||
private GSet<Block, BlockInfo> blocks;
|
private volatile GSet<Block, BlockInfo> blocks;
|
||||||
|
|
||||||
BlocksMap(final float loadFactor) {
|
BlocksMap(final float loadFactor) {
|
||||||
// Use 2% of total memory to size the GSet capacity
|
// Use 2% of total memory to size the GSet capacity
|
||||||
|
|
Loading…
Reference in New Issue