HDFS-9527. The return type of FSNamesystem.getBlockCollection should be changed to INodeFile.
This commit is contained in:
parent
8dfaa7ded6
commit
c65a796f88
|
@ -867,6 +867,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-8831. Trash Support for deletion in HDFS encryption zone. (xyao)
|
HDFS-8831. Trash Support for deletion in HDFS encryption zone. (xyao)
|
||||||
|
|
||||||
|
HDFS-9527. The return type of FSNamesystem.getBlockCollection should be
|
||||||
|
changed to INodeFile. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -2235,7 +2235,7 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
// OpenFileBlocks only inside snapshots also will be added to safemode
|
// OpenFileBlocks only inside snapshots also will be added to safemode
|
||||||
// threshold. So we need to update such blocks to safemode
|
// threshold. So we need to update such blocks to safemode
|
||||||
// refer HDFS-5283
|
// refer HDFS-5283
|
||||||
if (namesystem.isInSnapshot(storedBlock)) {
|
if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) {
|
||||||
int numOfReplicas = storedBlock.getUnderConstructionFeature()
|
int numOfReplicas = storedBlock.getUnderConstructionFeature()
|
||||||
.getNumExpectedLocations();
|
.getNumExpectedLocations();
|
||||||
bmSafeMode.incrementSafeBlockCount(numOfReplicas);
|
bmSafeMode.incrementSafeBlockCount(numOfReplicas);
|
||||||
|
@ -3632,7 +3632,7 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
return blocksMap.addBlockCollection(block, bc);
|
return blocksMap.addBlockCollection(block, bc);
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockCollection getBlockCollection(BlockInfo b) {
|
BlockCollection getBlockCollection(BlockInfo b) {
|
||||||
return namesystem.getBlockCollection(b.getBlockCollectionId());
|
return namesystem.getBlockCollection(b.getBlockCollectionId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -560,7 +560,7 @@ public class DecommissionManager {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockCollection bc = namesystem.getBlockCollection(bcId);
|
final BlockCollection bc = blockManager.getBlockCollection(block);
|
||||||
final NumberReplicas num = blockManager.countNodes(block);
|
final NumberReplicas num = blockManager.countNodes(block);
|
||||||
final int liveReplicas = num.liveReplicas();
|
final int liveReplicas = num.liveReplicas();
|
||||||
final int curReplicas = liveReplicas;
|
final int curReplicas = liveReplicas;
|
||||||
|
|
|
@ -3118,11 +3118,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isInSnapshot(BlockInfo blockUC) {
|
public boolean isInSnapshot(long blockCollectionID) {
|
||||||
assert hasReadLock();
|
assert hasReadLock();
|
||||||
final BlockCollection bc = blockManager.getBlockCollection(blockUC);
|
final INodeFile bc = getBlockCollection(blockCollectionID);
|
||||||
if (bc == null || !(bc instanceof INodeFile)
|
if (bc == null || !bc.isUnderConstruction()) {
|
||||||
|| !bc.isUnderConstruction()) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3149,8 +3148,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INodeFile getBlockCollection(BlockInfo b) {
|
||||||
|
return getBlockCollection(b.getBlockCollectionId());
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlockCollection getBlockCollection(long id) {
|
public INodeFile getBlockCollection(long id) {
|
||||||
INode inode = getFSDirectory().getInode(id);
|
INode inode = getFSDirectory().getInode(id);
|
||||||
return inode == null ? null : inode.asFile();
|
return inode == null ? null : inode.asFile();
|
||||||
}
|
}
|
||||||
|
@ -3211,8 +3214,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
+ " is null, likely because the file owning this block was"
|
+ " is null, likely because the file owning this block was"
|
||||||
+ " deleted and the block removal is delayed");
|
+ " deleted and the block removal is delayed");
|
||||||
}
|
}
|
||||||
long bcId = storedBlock.getBlockCollectionId();
|
final INodeFile iFile = getBlockCollection(storedBlock);
|
||||||
INodeFile iFile = ((INode)getBlockCollection(bcId)).asFile();
|
|
||||||
src = iFile.getFullPathName();
|
src = iFile.getFullPathName();
|
||||||
if (isFileDeleted(iFile)) {
|
if (isFileDeleted(iFile)) {
|
||||||
throw new FileNotFoundException("File not found: "
|
throw new FileNotFoundException("File not found: "
|
||||||
|
@ -3628,8 +3630,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
Block b = it.next();
|
Block b = it.next();
|
||||||
BlockInfo blockInfo = blockManager.getStoredBlock(b);
|
BlockInfo blockInfo = blockManager.getStoredBlock(b);
|
||||||
BlockCollection bc = getBlockCollection(
|
BlockCollection bc = getBlockCollection(blockInfo);
|
||||||
blockInfo.getBlockCollectionId());
|
|
||||||
if (bc.getStoragePolicyID() == lpPolicy.getId()) {
|
if (bc.getStoragePolicyID() == lpPolicy.getId()) {
|
||||||
filesToDelete.add(bc);
|
filesToDelete.add(bc);
|
||||||
}
|
}
|
||||||
|
@ -4548,15 +4549,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
// check stored block state
|
// check stored block state
|
||||||
BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
|
BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
|
||||||
if (storedBlock == null ||
|
if (storedBlock == null) {
|
||||||
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
|
throw new IOException(block + " does not exist.");
|
||||||
throw new IOException(block +
|
}
|
||||||
" does not exist or is not under Construction" + storedBlock);
|
if (storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
|
||||||
|
throw new IOException("Unexpected BlockUCState: " + block
|
||||||
|
+ " is " + storedBlock.getBlockUCState()
|
||||||
|
+ " but not " + BlockUCState.UNDER_CONSTRUCTION);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check file inode
|
// check file inode
|
||||||
long bcId = storedBlock.getBlockCollectionId();
|
final INodeFile file = getBlockCollection(storedBlock);
|
||||||
final INodeFile file = ((INode)getBlockCollection(bcId)).asFile();
|
|
||||||
if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) {
|
if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) {
|
||||||
throw new IOException("The file " + storedBlock +
|
throw new IOException("The file " + storedBlock +
|
||||||
" belonged to does not exist or it is not under construction.");
|
" belonged to does not exist or it is not under construction.");
|
||||||
|
@ -4820,8 +4823,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
while (blkIterator.hasNext()) {
|
while (blkIterator.hasNext()) {
|
||||||
BlockInfo blk = blkIterator.next();
|
BlockInfo blk = blkIterator.next();
|
||||||
BlockCollection bc = getBlockCollection(blk.getBlockCollectionId());
|
final INodeFile inode = getBlockCollection(blk);
|
||||||
final INode inode = (INode)bc;
|
|
||||||
skip++;
|
skip++;
|
||||||
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
|
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
|
||||||
String src = FSDirectory.getFullPathName(inode);
|
String src = FSDirectory.getFullPathName(inode);
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.hdfs.BlockReader;
|
import org.apache.hadoop.hdfs.BlockReader;
|
||||||
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
||||||
|
@ -47,7 +48,6 @@ import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.net.Peer;
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
|
@ -254,8 +253,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);
|
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
BlockCollection bc = bm.getBlockCollection(blockInfo);
|
final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo);
|
||||||
INode iNode = (INode) bc;
|
|
||||||
NumberReplicas numberReplicas= bm.countNodes(blockInfo);
|
NumberReplicas numberReplicas= bm.countNodes(blockInfo);
|
||||||
out.println("Block Id: " + blockId);
|
out.println("Block Id: " + blockId);
|
||||||
out.println("Block belongs to: "+iNode.getFullPathName());
|
out.println("Block belongs to: "+iNode.getFullPathName());
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
|
||||||
|
@ -49,7 +48,7 @@ public interface Namesystem extends RwLock, SafeMode {
|
||||||
|
|
||||||
void startSecretManagerIfNecessary();
|
void startSecretManagerIfNecessary();
|
||||||
|
|
||||||
boolean isInSnapshot(BlockInfo blockUC);
|
boolean isInSnapshot(long blockCollectionID);
|
||||||
|
|
||||||
CacheManager getCacheManager();
|
CacheManager getCacheManager();
|
||||||
HAContext getHAContext();
|
HAContext getHAContext();
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
|
||||||
|
@ -62,7 +63,8 @@ public class TestClientProtocolForPipelineRecovery {
|
||||||
namenode.updateBlockForPipeline(firstBlock, "");
|
namenode.updateBlockForPipeline(firstBlock, "");
|
||||||
Assert.fail("Can not get a new GS from a finalized block");
|
Assert.fail("Can not get a new GS from a finalized block");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
Assert.assertTrue(e.getMessage().contains("is not under Construction"));
|
Assert.assertTrue(e.getMessage().contains(
|
||||||
|
"not " + BlockUCState.UNDER_CONSTRUCTION));
|
||||||
}
|
}
|
||||||
|
|
||||||
// test getNewStampAndToken on a non-existent block
|
// test getNewStampAndToken on a non-existent block
|
||||||
|
|
|
@ -51,16 +51,18 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.junit.Assert;
|
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -438,13 +440,12 @@ public class TestBlockManager {
|
||||||
|
|
||||||
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
|
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
|
||||||
long inodeId = ++mockINodeId;
|
long inodeId = ++mockINodeId;
|
||||||
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
|
||||||
Mockito.doReturn(inodeId).when(bc).getId();
|
|
||||||
Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
|
|
||||||
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
|
|
||||||
|
|
||||||
|
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
|
||||||
blockInfo.setReplication((short) 3);
|
blockInfo.setReplication((short) 3);
|
||||||
blockInfo.setBlockCollectionId(inodeId);
|
blockInfo.setBlockCollectionId(inodeId);
|
||||||
|
Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
|
||||||
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
||||||
return blockInfo;
|
return blockInfo;
|
||||||
}
|
}
|
||||||
|
@ -746,12 +747,11 @@ public class TestBlockManager {
|
||||||
|
|
||||||
private BlockInfo addBlockToBM(long blkId) {
|
private BlockInfo addBlockToBM(long blkId) {
|
||||||
Block block = new Block(blkId);
|
Block block = new Block(blkId);
|
||||||
BlockInfo blockInfo =
|
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
|
||||||
new BlockInfoContiguous(block, (short) 3);
|
|
||||||
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
|
||||||
long inodeId = ++mockINodeId;
|
long inodeId = ++mockINodeId;
|
||||||
doReturn(inodeId).when(bc).getId();
|
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
|
||||||
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
||||||
|
blockInfo.setBlockCollectionId(inodeId);
|
||||||
doReturn(bc).when(fsn).getBlockCollection(inodeId);
|
doReturn(bc).when(fsn).getBlockCollection(inodeId);
|
||||||
return blockInfo;
|
return blockInfo;
|
||||||
}
|
}
|
||||||
|
@ -760,9 +760,9 @@ public class TestBlockManager {
|
||||||
Block block = new Block(blkId);
|
Block block = new Block(blkId);
|
||||||
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
|
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
|
||||||
blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null);
|
blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null);
|
||||||
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
|
||||||
long inodeId = ++mockINodeId;
|
long inodeId = ++mockINodeId;
|
||||||
doReturn(inodeId).when(bc).getId();
|
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
|
||||||
|
blockInfo.setBlockCollectionId(inodeId);
|
||||||
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
||||||
doReturn(bc).when(fsn).getBlockCollection(inodeId);
|
doReturn(bc).when(fsn).getBlockCollection(inodeId);
|
||||||
return blockInfo;
|
return blockInfo;
|
||||||
|
|
|
@ -56,7 +56,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -1309,7 +1312,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 60000)
|
||||||
public void testAddStoredBlockDoesNotCauseSkippedReplication()
|
public void testAddStoredBlockDoesNotCauseSkippedReplication()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Namesystem mockNS = mock(Namesystem.class);
|
FSNamesystem mockNS = mock(FSNamesystem.class);
|
||||||
when(mockNS.hasWriteLock()).thenReturn(true);
|
when(mockNS.hasWriteLock()).thenReturn(true);
|
||||||
when(mockNS.hasReadLock()).thenReturn(true);
|
when(mockNS.hasReadLock()).thenReturn(true);
|
||||||
BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
|
BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
|
||||||
|
@ -1337,10 +1340,11 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
|
||||||
// queue.
|
// queue.
|
||||||
BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
|
BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
|
||||||
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
|
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
|
||||||
BlockCollection bc = mock(BlockCollection.class);
|
info.setBlockCollectionId(1000L);
|
||||||
when(bc.getId()).thenReturn(1000L);
|
|
||||||
when(mockNS.getBlockCollection(1000L)).thenReturn(bc);
|
final INodeFile file = TestINodeFile.createINodeFile(1000L);
|
||||||
bm.addBlockCollection(info, bc);
|
when(mockNS.getBlockCollection(1000L)).thenReturn(file);
|
||||||
|
bm.addBlockCollection(info, file);
|
||||||
|
|
||||||
// Adding this block will increase its current replication, and that will
|
// Adding this block will increase its current replication, and that will
|
||||||
// remove it from the queue.
|
// remove it from the queue.
|
||||||
|
|
|
@ -826,7 +826,7 @@ public class TestFsck {
|
||||||
try {
|
try {
|
||||||
fsn.writeLock();
|
fsn.writeLock();
|
||||||
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
|
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
|
||||||
bc = bm.getBlockCollection(bi);
|
bc = fsn.getBlockCollection(bi);
|
||||||
} finally {
|
} finally {
|
||||||
fsn.writeUnlock();
|
fsn.writeUnlock();
|
||||||
}
|
}
|
||||||
|
@ -1394,7 +1394,7 @@ public class TestFsck {
|
||||||
try {
|
try {
|
||||||
fsn.writeLock();
|
fsn.writeLock();
|
||||||
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
|
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
|
||||||
bc = bm.getBlockCollection(bi);
|
bc = fsn.getBlockCollection(bi);
|
||||||
} finally {
|
} finally {
|
||||||
fsn.writeUnlock();
|
fsn.writeUnlock();
|
||||||
}
|
}
|
||||||
|
@ -1609,7 +1609,7 @@ public class TestFsck {
|
||||||
try {
|
try {
|
||||||
fsn.writeLock();
|
fsn.writeLock();
|
||||||
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
|
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
|
||||||
bc = bm.getBlockCollection(bi);
|
bc = fsn.getBlockCollection(bi);
|
||||||
} finally {
|
} finally {
|
||||||
fsn.writeUnlock();
|
fsn.writeUnlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,6 +88,11 @@ public class TestINodeFile {
|
||||||
private short replication;
|
private short replication;
|
||||||
private long preferredBlockSize = 1024;
|
private long preferredBlockSize = 1024;
|
||||||
|
|
||||||
|
static public INodeFile createINodeFile(long id) {
|
||||||
|
return new INodeFile(id, ("file" + id).getBytes(), perm, 0L, 0L, null,
|
||||||
|
(short)3, 1024L);
|
||||||
|
}
|
||||||
|
|
||||||
INodeFile createINodeFile(short replication, long preferredBlockSize) {
|
INodeFile createINodeFile(short replication, long preferredBlockSize) {
|
||||||
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||||
null, replication, preferredBlockSize, (byte)0);
|
null, replication, preferredBlockSize, (byte)0);
|
||||||
|
|
Loading…
Reference in New Issue