HDFS-9412. getBlocks occupies FSLock and takes too long to complete. Contributed by He Tianyi. Backport HDFS-11855 by Vinitha Reddy Gankidi.

(cherry picked from commit 67523ffcf4)
(cherry picked from commit a6d0e9e56a)
This commit is contained in:
Konstantin V Shvachko 2017-05-21 13:55:28 -07:00
parent 43ea501e33
commit 321f730ca8
3 changed files with 27 additions and 3 deletions

View File

@ -105,6 +105,9 @@ Release 2.7.4 - UNRELEASED
HDFS-11384. Balancer disperses getBlocks calls to avoid NameNode's rpc queue
saturation. (shv)
HDFS-9412. getBlocks occupies FSLock and takes too long to complete.
Contributed by He Tianyi. Backport HDFS-11855 by Vinitha Reddy Gankidi.
BUG FIXES
HDFS-8307. Spurious DNS Queries from hdfs shell. (Andres Perez via aengineer)

View File

@ -260,6 +260,14 @@ public class BlockManager {
* processed again after aquiring lock again.
*/
private int numBlocksPerIteration;
/**
* Minimum size that a block can be sent to Balancer through getBlocks.
* And after HDFS-8824, the small blocks are unused anyway, so there's no
* point to send them to balancer.
*/
private long getBlocksMinBlockSize = -1;
/**
* Progress of the Replication queues initialisation.
*/
@ -350,7 +358,10 @@ public class BlockManager {
this.numBlocksPerIteration = conf.getInt(
DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
this.getBlocksMinBlockSize = conf.getLongBytes(
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
LOG.info("defaultReplication = " + defaultReplication);
LOG.info("maxReplication = " + maxReplication);
LOG.info("minReplication = " + minReplication);
@ -1069,6 +1080,9 @@ public class BlockManager {
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
if(totalSize<size) {
@ -1076,6 +1090,9 @@ public class BlockManager {
for(int i=0; i<startBlock&&totalSize<size; i++) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
}

View File

@ -188,6 +188,9 @@ public class TestGetBlocks {
final Random r = new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
.numDataNodes(REPLICATION_FACTOR)
.storagesPerDatanode(4)
@ -200,7 +203,7 @@ public class TestGetBlocks {
FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
REPLICATION_FACTOR);
byte[] data = new byte[1024];
long fileLen = 12 * DEFAULT_BLOCK_SIZE;
long fileLen = 12 * DEFAULT_BLOCK_SIZE + 1;
long bytesToWrite = fileLen;
while (bytesToWrite > 0) {
r.nextBytes(data);
@ -220,7 +223,8 @@ public class TestGetBlocks {
do {
locatedBlocks = dfsclient.getNamenode()
.getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
assertEquals(12, locatedBlocks.size());
assertEquals(13, locatedBlocks.size());
notWritten = false;
for (int i = 0; i < 2; i++) {
dataNodes = locatedBlocks.get(i).getLocations();