HDFS-8482. Rename BlockInfoContiguous to BlockInfo. Contributed by Zhe Zhang.
This commit is contained in:
parent
cab7674e54
commit
4928f54733
|
@ -580,6 +580,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-8377. Support HTTP/2 in datanode. (Duo Zhang via wheat9)
|
HDFS-8377. Support HTTP/2 in datanode. (Duo Zhang via wheat9)
|
||||||
|
|
||||||
|
HDFS-8482. Rename BlockInfoContiguous to BlockInfo. (Zhe Zhang via wang)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -31,7 +31,7 @@ public interface BlockCollection {
|
||||||
/**
|
/**
|
||||||
* Get the last block of the collection.
|
* Get the last block of the collection.
|
||||||
*/
|
*/
|
||||||
public BlockInfoContiguous getLastBlock();
|
public BlockInfo getLastBlock();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get content summary.
|
* Get content summary.
|
||||||
|
@ -46,7 +46,7 @@ public interface BlockCollection {
|
||||||
/**
|
/**
|
||||||
* Get the blocks.
|
* Get the blocks.
|
||||||
*/
|
*/
|
||||||
public BlockInfoContiguous[] getBlocks();
|
public BlockInfo[] getBlocks();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get preferred block size for the collection
|
* Get preferred block size for the collection
|
||||||
|
@ -73,13 +73,13 @@ public interface BlockCollection {
|
||||||
/**
|
/**
|
||||||
* Set the block at the given index.
|
* Set the block at the given index.
|
||||||
*/
|
*/
|
||||||
public void setBlock(int index, BlockInfoContiguous blk);
|
public void setBlock(int index, BlockInfo blk);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert the last block of the collection to an under-construction block
|
* Convert the last block of the collection to an under-construction block
|
||||||
* and set the locations.
|
* and set the locations.
|
||||||
*/
|
*/
|
||||||
public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfoContiguous lastBlock,
|
public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock,
|
||||||
DatanodeStorageInfo[] targets) throws IOException;
|
DatanodeStorageInfo[] targets) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -26,13 +26,13 @@ import org.apache.hadoop.util.LightWeightGSet;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* BlockInfo class maintains for a given block
|
* BlockInfo class maintains for a given block
|
||||||
* the {@link BlockCollection} it is part of and datanodes where the replicas of
|
* the {@link BlockCollection} it is part of and datanodes where the replicas of
|
||||||
* the block are stored.
|
* the block are stored.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class BlockInfoContiguous extends Block
|
public class BlockInfo extends Block
|
||||||
implements LightWeightGSet.LinkedElement {
|
implements LightWeightGSet.LinkedElement {
|
||||||
public static final BlockInfoContiguous[] EMPTY_ARRAY = {};
|
public static final BlockInfo[] EMPTY_ARRAY = {};
|
||||||
|
|
||||||
private BlockCollection bc;
|
private BlockCollection bc;
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ public class BlockInfoContiguous extends Block
|
||||||
* {@link DatanodeStorageInfo} and triplets[3*i+1] and triplets[3*i+2] are
|
* {@link DatanodeStorageInfo} and triplets[3*i+1] and triplets[3*i+2] are
|
||||||
* references to the previous and the next blocks, respectively, in the list
|
* references to the previous and the next blocks, respectively, in the list
|
||||||
* of blocks belonging to this storage.
|
* of blocks belonging to this storage.
|
||||||
*
|
*
|
||||||
* Using previous and next in Object triplets is done instead of a
|
* Using previous and next in Object triplets is done instead of a
|
||||||
* {@link LinkedList} list to efficiently use memory. With LinkedList the cost
|
* {@link LinkedList} list to efficiently use memory. With LinkedList the cost
|
||||||
* per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
|
* per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
|
||||||
|
@ -57,12 +57,12 @@ public class BlockInfoContiguous extends Block
|
||||||
* Construct an entry for blocksmap
|
* Construct an entry for blocksmap
|
||||||
* @param replication the block's replication factor
|
* @param replication the block's replication factor
|
||||||
*/
|
*/
|
||||||
public BlockInfoContiguous(short replication) {
|
public BlockInfo(short replication) {
|
||||||
this.triplets = new Object[3*replication];
|
this.triplets = new Object[3*replication];
|
||||||
this.bc = null;
|
this.bc = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous(Block blk, short replication) {
|
public BlockInfo(Block blk, short replication) {
|
||||||
super(blk);
|
super(blk);
|
||||||
this.triplets = new Object[3*replication];
|
this.triplets = new Object[3*replication];
|
||||||
this.bc = null;
|
this.bc = null;
|
||||||
|
@ -73,7 +73,7 @@ public class BlockInfoContiguous extends Block
|
||||||
* This is used to convert BlockInfoUnderConstruction
|
* This is used to convert BlockInfoUnderConstruction
|
||||||
* @param from BlockInfo to copy from.
|
* @param from BlockInfo to copy from.
|
||||||
*/
|
*/
|
||||||
protected BlockInfoContiguous(BlockInfoContiguous from) {
|
protected BlockInfo(BlockInfo from) {
|
||||||
super(from);
|
super(from);
|
||||||
this.triplets = new Object[from.triplets.length];
|
this.triplets = new Object[from.triplets.length];
|
||||||
this.bc = from.bc;
|
this.bc = from.bc;
|
||||||
|
@ -102,22 +102,22 @@ public class BlockInfoContiguous extends Block
|
||||||
return (DatanodeStorageInfo)triplets[index*3];
|
return (DatanodeStorageInfo)triplets[index*3];
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous getPrevious(int index) {
|
private BlockInfo getPrevious(int index) {
|
||||||
assert this.triplets != null : "BlockInfo is not initialized";
|
assert this.triplets != null : "BlockInfo is not initialized";
|
||||||
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
|
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
|
||||||
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+1];
|
BlockInfo info = (BlockInfo)triplets[index*3+1];
|
||||||
assert info == null ||
|
assert info == null ||
|
||||||
info.getClass().getName().startsWith(BlockInfoContiguous.class.getName()) :
|
info.getClass().getName().startsWith(BlockInfo.class.getName()) :
|
||||||
"BlockInfo is expected at " + index*3;
|
"BlockInfo is expected at " + index*3;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfoContiguous getNext(int index) {
|
BlockInfo getNext(int index) {
|
||||||
assert this.triplets != null : "BlockInfo is not initialized";
|
assert this.triplets != null : "BlockInfo is not initialized";
|
||||||
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
|
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
|
||||||
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+2];
|
BlockInfo info = (BlockInfo)triplets[index*3+2];
|
||||||
assert info == null || info.getClass().getName().startsWith(
|
assert info == null || info.getClass().getName().startsWith(
|
||||||
BlockInfoContiguous.class.getName()) :
|
BlockInfo.class.getName()) :
|
||||||
"BlockInfo is expected at " + index*3;
|
"BlockInfo is expected at " + index*3;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
@ -136,10 +136,10 @@ public class BlockInfoContiguous extends Block
|
||||||
* @param to - block to be set to previous on the list of blocks
|
* @param to - block to be set to previous on the list of blocks
|
||||||
* @return current previous block on the list of blocks
|
* @return current previous block on the list of blocks
|
||||||
*/
|
*/
|
||||||
private BlockInfoContiguous setPrevious(int index, BlockInfoContiguous to) {
|
private BlockInfo setPrevious(int index, BlockInfo to) {
|
||||||
assert this.triplets != null : "BlockInfo is not initialized";
|
assert this.triplets != null : "BlockInfo is not initialized";
|
||||||
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
|
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
|
||||||
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+1];
|
BlockInfo info = (BlockInfo)triplets[index*3+1];
|
||||||
triplets[index*3+1] = to;
|
triplets[index*3+1] = to;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
@ -152,10 +152,10 @@ public class BlockInfoContiguous extends Block
|
||||||
* @param to - block to be set to next on the list of blocks
|
* @param to - block to be set to next on the list of blocks
|
||||||
* * @return current next block on the list of blocks
|
* * @return current next block on the list of blocks
|
||||||
*/
|
*/
|
||||||
private BlockInfoContiguous setNext(int index, BlockInfoContiguous to) {
|
private BlockInfo setNext(int index, BlockInfo to) {
|
||||||
assert this.triplets != null : "BlockInfo is not initialized";
|
assert this.triplets != null : "BlockInfo is not initialized";
|
||||||
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
|
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
|
||||||
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+2];
|
BlockInfo info = (BlockInfo)triplets[index*3+2];
|
||||||
triplets[index*3+2] = to;
|
triplets[index*3+2] = to;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ public class BlockInfoContiguous extends Block
|
||||||
int last = numNodes();
|
int last = numNodes();
|
||||||
if(triplets.length >= (last+num)*3)
|
if(triplets.length >= (last+num)*3)
|
||||||
return last;
|
return last;
|
||||||
/* Not enough space left. Create a new array. Should normally
|
/* Not enough space left. Create a new array. Should normally
|
||||||
* happen only when replication is manually increased by the user. */
|
* happen only when replication is manually increased by the user. */
|
||||||
Object[] old = triplets;
|
Object[] old = triplets;
|
||||||
triplets = new Object[(last+num)*3];
|
triplets = new Object[(last+num)*3];
|
||||||
|
@ -215,18 +215,18 @@ public class BlockInfoContiguous extends Block
|
||||||
int dnIndex = findStorageInfo(storage);
|
int dnIndex = findStorageInfo(storage);
|
||||||
if(dnIndex < 0) // the node is not found
|
if(dnIndex < 0) // the node is not found
|
||||||
return false;
|
return false;
|
||||||
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
|
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
|
||||||
"Block is still in the list and must be removed first.";
|
"Block is still in the list and must be removed first.";
|
||||||
// find the last not null node
|
// find the last not null node
|
||||||
int lastNode = numNodes()-1;
|
int lastNode = numNodes()-1;
|
||||||
// replace current node triplet by the lastNode one
|
// replace current node triplet by the lastNode one
|
||||||
setStorageInfo(dnIndex, getStorageInfo(lastNode));
|
setStorageInfo(dnIndex, getStorageInfo(lastNode));
|
||||||
setNext(dnIndex, getNext(lastNode));
|
setNext(dnIndex, getNext(lastNode));
|
||||||
setPrevious(dnIndex, getPrevious(lastNode));
|
setPrevious(dnIndex, getPrevious(lastNode));
|
||||||
// set the last triplet to null
|
// set the last triplet to null
|
||||||
setStorageInfo(lastNode, null);
|
setStorageInfo(lastNode, null);
|
||||||
setNext(lastNode, null);
|
setNext(lastNode, null);
|
||||||
setPrevious(lastNode, null);
|
setPrevious(lastNode, null);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,7 +245,7 @@ public class BlockInfoContiguous extends Block
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find specified DatanodeStorageInfo.
|
* Find specified DatanodeStorageInfo.
|
||||||
* @return index or -1 if not found.
|
* @return index or -1 if not found.
|
||||||
|
@ -265,16 +265,16 @@ public class BlockInfoContiguous extends Block
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Insert this block into the head of the list of blocks
|
* Insert this block into the head of the list of blocks
|
||||||
* related to the specified DatanodeStorageInfo.
|
* related to the specified DatanodeStorageInfo.
|
||||||
* If the head is null then form a new list.
|
* If the head is null then form a new list.
|
||||||
* @return current block as the new head of the list.
|
* @return current block as the new head of the list.
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous listInsert(BlockInfoContiguous head,
|
BlockInfo listInsert(BlockInfo head,
|
||||||
DatanodeStorageInfo storage) {
|
DatanodeStorageInfo storage) {
|
||||||
int dnIndex = this.findStorageInfo(storage);
|
int dnIndex = this.findStorageInfo(storage);
|
||||||
assert dnIndex >= 0 : "Data node is not found: current";
|
assert dnIndex >= 0 : "Data node is not found: current";
|
||||||
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
|
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
|
||||||
"Block is already in the list and cannot be inserted.";
|
"Block is already in the list and cannot be inserted.";
|
||||||
this.setPrevious(dnIndex, null);
|
this.setPrevious(dnIndex, null);
|
||||||
this.setNext(dnIndex, head);
|
this.setNext(dnIndex, head);
|
||||||
|
@ -284,14 +284,14 @@ public class BlockInfoContiguous extends Block
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove this block from the list of blocks
|
* Remove this block from the list of blocks
|
||||||
* related to the specified DatanodeStorageInfo.
|
* related to the specified DatanodeStorageInfo.
|
||||||
* If this block is the head of the list then return the next block as
|
* If this block is the head of the list then return the next block as
|
||||||
* the new head.
|
* the new head.
|
||||||
* @return the new head of the list or null if the list becomes
|
* @return the new head of the list or null if the list becomes
|
||||||
* empy after deletion.
|
* empy after deletion.
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous listRemove(BlockInfoContiguous head,
|
BlockInfo listRemove(BlockInfo head,
|
||||||
DatanodeStorageInfo storage) {
|
DatanodeStorageInfo storage) {
|
||||||
if(head == null)
|
if(head == null)
|
||||||
return null;
|
return null;
|
||||||
|
@ -299,8 +299,8 @@ public class BlockInfoContiguous extends Block
|
||||||
if(dnIndex < 0) // this block is not on the data-node list
|
if(dnIndex < 0) // this block is not on the data-node list
|
||||||
return head;
|
return head;
|
||||||
|
|
||||||
BlockInfoContiguous next = this.getNext(dnIndex);
|
BlockInfo next = this.getNext(dnIndex);
|
||||||
BlockInfoContiguous prev = this.getPrevious(dnIndex);
|
BlockInfo prev = this.getPrevious(dnIndex);
|
||||||
this.setNext(dnIndex, null);
|
this.setNext(dnIndex, null);
|
||||||
this.setPrevious(dnIndex, null);
|
this.setPrevious(dnIndex, null);
|
||||||
if(prev != null)
|
if(prev != null)
|
||||||
|
@ -318,13 +318,13 @@ public class BlockInfoContiguous extends Block
|
||||||
*
|
*
|
||||||
* @return the new head of the list.
|
* @return the new head of the list.
|
||||||
*/
|
*/
|
||||||
public BlockInfoContiguous moveBlockToHead(BlockInfoContiguous head,
|
public BlockInfo moveBlockToHead(BlockInfo head,
|
||||||
DatanodeStorageInfo storage, int curIndex, int headIndex) {
|
DatanodeStorageInfo storage, int curIndex, int headIndex) {
|
||||||
if (head == this) {
|
if (head == this) {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
BlockInfoContiguous next = this.setNext(curIndex, head);
|
BlockInfo next = this.setNext(curIndex, head);
|
||||||
BlockInfoContiguous prev = this.setPrevious(curIndex, null);
|
BlockInfo prev = this.setPrevious(curIndex, null);
|
||||||
|
|
||||||
head.setPrevious(headIndex, this);
|
head.setPrevious(headIndex, this);
|
||||||
prev.setNext(prev.findStorageInfo(storage), next);
|
prev.setNext(prev.findStorageInfo(storage), next);
|
||||||
|
@ -346,7 +346,7 @@ public class BlockInfoContiguous extends Block
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Is this block complete?
|
* Is this block complete?
|
||||||
*
|
*
|
||||||
* @return true if the state of the block is {@link BlockUCState#COMPLETE}
|
* @return true if the state of the block is {@link BlockUCState#COMPLETE}
|
||||||
*/
|
*/
|
||||||
public boolean isComplete() {
|
public boolean isComplete() {
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
* Represents a block that is currently being constructed.<br>
|
* Represents a block that is currently being constructed.<br>
|
||||||
* This is usually the last block of a file opened for write or append.
|
* This is usually the last block of a file opened for write or append.
|
||||||
*/
|
*/
|
||||||
public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
|
public class BlockInfoContiguousUnderConstruction extends BlockInfo {
|
||||||
/** Block state. See {@link BlockUCState} */
|
/** Block state. See {@link BlockUCState} */
|
||||||
private BlockUCState blockUCState;
|
private BlockUCState blockUCState;
|
||||||
|
|
||||||
|
@ -182,10 +182,10 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
|
||||||
* the client or it does not have at least a minimal number of replicas
|
* the client or it does not have at least a minimal number of replicas
|
||||||
* reported from data-nodes.
|
* reported from data-nodes.
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous convertToCompleteBlock() throws IOException {
|
BlockInfo convertToCompleteBlock() throws IOException {
|
||||||
assert getBlockUCState() != BlockUCState.COMPLETE :
|
assert getBlockUCState() != BlockUCState.COMPLETE :
|
||||||
"Trying to convert a COMPLETE block";
|
"Trying to convert a COMPLETE block";
|
||||||
return new BlockInfoContiguous(this);
|
return new BlockInfo(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Set expected locations */
|
/** Set expected locations */
|
||||||
|
|
|
@ -543,8 +543,8 @@ public class BlockManager {
|
||||||
int usableReplicas = numReplicas.liveReplicas() +
|
int usableReplicas = numReplicas.liveReplicas() +
|
||||||
numReplicas.decommissionedAndDecommissioning();
|
numReplicas.decommissionedAndDecommissioning();
|
||||||
|
|
||||||
if (block instanceof BlockInfoContiguous) {
|
if (block instanceof BlockInfo) {
|
||||||
BlockCollection bc = ((BlockInfoContiguous) block).getBlockCollection();
|
BlockCollection bc = ((BlockInfo) block).getBlockCollection();
|
||||||
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
|
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
|
||||||
out.print(fileName + ": ");
|
out.print(fileName + ": ");
|
||||||
}
|
}
|
||||||
|
@ -624,7 +624,7 @@ public class BlockManager {
|
||||||
Block commitBlock) throws IOException {
|
Block commitBlock) throws IOException {
|
||||||
if(commitBlock == null)
|
if(commitBlock == null)
|
||||||
return false; // not committing, this is a block allocation retry
|
return false; // not committing, this is a block allocation retry
|
||||||
BlockInfoContiguous lastBlock = bc.getLastBlock();
|
BlockInfo lastBlock = bc.getLastBlock();
|
||||||
if(lastBlock == null)
|
if(lastBlock == null)
|
||||||
return false; // no blocks in file yet
|
return false; // no blocks in file yet
|
||||||
if(lastBlock.isComplete())
|
if(lastBlock.isComplete())
|
||||||
|
@ -644,11 +644,11 @@ public class BlockManager {
|
||||||
* @throws IOException if the block does not have at least a minimal number
|
* @throws IOException if the block does not have at least a minimal number
|
||||||
* of replicas reported from data-nodes.
|
* of replicas reported from data-nodes.
|
||||||
*/
|
*/
|
||||||
private BlockInfoContiguous completeBlock(final BlockCollection bc,
|
private BlockInfo completeBlock(final BlockCollection bc,
|
||||||
final int blkIndex, boolean force) throws IOException {
|
final int blkIndex, boolean force) throws IOException {
|
||||||
if(blkIndex < 0)
|
if(blkIndex < 0)
|
||||||
return null;
|
return null;
|
||||||
BlockInfoContiguous curBlock = bc.getBlocks()[blkIndex];
|
BlockInfo curBlock = bc.getBlocks()[blkIndex];
|
||||||
if(curBlock.isComplete())
|
if(curBlock.isComplete())
|
||||||
return curBlock;
|
return curBlock;
|
||||||
BlockInfoContiguousUnderConstruction ucBlock =
|
BlockInfoContiguousUnderConstruction ucBlock =
|
||||||
|
@ -660,7 +660,7 @@ public class BlockManager {
|
||||||
if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED)
|
if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED)
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"Cannot complete block: block has not been COMMITTED by the client");
|
"Cannot complete block: block has not been COMMITTED by the client");
|
||||||
BlockInfoContiguous completeBlock = ucBlock.convertToCompleteBlock();
|
BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
|
||||||
// replace penultimate block in file
|
// replace penultimate block in file
|
||||||
bc.setBlock(blkIndex, completeBlock);
|
bc.setBlock(blkIndex, completeBlock);
|
||||||
|
|
||||||
|
@ -678,9 +678,9 @@ public class BlockManager {
|
||||||
return blocksMap.replaceBlock(completeBlock);
|
return blocksMap.replaceBlock(completeBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous completeBlock(final BlockCollection bc,
|
private BlockInfo completeBlock(final BlockCollection bc,
|
||||||
final BlockInfoContiguous block, boolean force) throws IOException {
|
final BlockInfo block, boolean force) throws IOException {
|
||||||
BlockInfoContiguous[] fileBlocks = bc.getBlocks();
|
BlockInfo[] fileBlocks = bc.getBlocks();
|
||||||
for(int idx = 0; idx < fileBlocks.length; idx++)
|
for(int idx = 0; idx < fileBlocks.length; idx++)
|
||||||
if(fileBlocks[idx] == block) {
|
if(fileBlocks[idx] == block) {
|
||||||
return completeBlock(bc, idx, force);
|
return completeBlock(bc, idx, force);
|
||||||
|
@ -693,7 +693,7 @@ public class BlockManager {
|
||||||
* regardless of whether enough replicas are present. This is necessary
|
* regardless of whether enough replicas are present. This is necessary
|
||||||
* when tailing edit logs as a Standby.
|
* when tailing edit logs as a Standby.
|
||||||
*/
|
*/
|
||||||
public BlockInfoContiguous forceCompleteBlock(final BlockCollection bc,
|
public BlockInfo forceCompleteBlock(final BlockCollection bc,
|
||||||
final BlockInfoContiguousUnderConstruction block) throws IOException {
|
final BlockInfoContiguousUnderConstruction block) throws IOException {
|
||||||
block.commitBlock(block);
|
block.commitBlock(block);
|
||||||
return completeBlock(bc, block, true);
|
return completeBlock(bc, block, true);
|
||||||
|
@ -716,7 +716,7 @@ public class BlockManager {
|
||||||
*/
|
*/
|
||||||
public LocatedBlock convertLastBlockToUnderConstruction(
|
public LocatedBlock convertLastBlockToUnderConstruction(
|
||||||
BlockCollection bc, long bytesToRemove) throws IOException {
|
BlockCollection bc, long bytesToRemove) throws IOException {
|
||||||
BlockInfoContiguous oldBlock = bc.getLastBlock();
|
BlockInfo oldBlock = bc.getLastBlock();
|
||||||
if(oldBlock == null ||
|
if(oldBlock == null ||
|
||||||
bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove)
|
bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove)
|
||||||
return null;
|
return null;
|
||||||
|
@ -769,7 +769,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<LocatedBlock> createLocatedBlockList(
|
private List<LocatedBlock> createLocatedBlockList(
|
||||||
final BlockInfoContiguous[] blocks,
|
final BlockInfo[] blocks,
|
||||||
final long offset, final long length, final int nrBlocksToReturn,
|
final long offset, final long length, final int nrBlocksToReturn,
|
||||||
final AccessMode mode) throws IOException {
|
final AccessMode mode) throws IOException {
|
||||||
int curBlk = 0;
|
int curBlk = 0;
|
||||||
|
@ -799,7 +799,7 @@ public class BlockManager {
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
private LocatedBlock createLocatedBlock(final BlockInfoContiguous[] blocks,
|
private LocatedBlock createLocatedBlock(final BlockInfo[] blocks,
|
||||||
final long endPos, final AccessMode mode) throws IOException {
|
final long endPos, final AccessMode mode) throws IOException {
|
||||||
int curBlk = 0;
|
int curBlk = 0;
|
||||||
long curPos = 0;
|
long curPos = 0;
|
||||||
|
@ -815,7 +815,7 @@ public class BlockManager {
|
||||||
return createLocatedBlock(blocks[curBlk], curPos, mode);
|
return createLocatedBlock(blocks[curBlk], curPos, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos,
|
private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos,
|
||||||
final AccessMode mode) throws IOException {
|
final AccessMode mode) throws IOException {
|
||||||
final LocatedBlock lb = createLocatedBlock(blk, pos);
|
final LocatedBlock lb = createLocatedBlock(blk, pos);
|
||||||
if (mode != null) {
|
if (mode != null) {
|
||||||
|
@ -825,7 +825,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return a LocatedBlock for the given block */
|
/** @return a LocatedBlock for the given block */
|
||||||
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos
|
private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
if (blk instanceof BlockInfoContiguousUnderConstruction) {
|
if (blk instanceof BlockInfoContiguousUnderConstruction) {
|
||||||
if (blk.isComplete()) {
|
if (blk.isComplete()) {
|
||||||
|
@ -874,7 +874,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Create a LocatedBlocks. */
|
/** Create a LocatedBlocks. */
|
||||||
public LocatedBlocks createLocatedBlocks(final BlockInfoContiguous[] blocks,
|
public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
|
||||||
final long fileSizeExcludeBlocksUnderConstruction,
|
final long fileSizeExcludeBlocksUnderConstruction,
|
||||||
final boolean isFileUnderConstruction, final long offset,
|
final boolean isFileUnderConstruction, final long offset,
|
||||||
final long length, final boolean needBlockToken,
|
final long length, final boolean needBlockToken,
|
||||||
|
@ -897,7 +897,7 @@ public class BlockManager {
|
||||||
final LocatedBlock lastlb;
|
final LocatedBlock lastlb;
|
||||||
final boolean isComplete;
|
final boolean isComplete;
|
||||||
if (!inSnapshot) {
|
if (!inSnapshot) {
|
||||||
final BlockInfoContiguous last = blocks[blocks.length - 1];
|
final BlockInfo last = blocks[blocks.length - 1];
|
||||||
final long lastPos = last.isComplete()?
|
final long lastPos = last.isComplete()?
|
||||||
fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
|
fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
|
||||||
: fileSizeExcludeBlocksUnderConstruction;
|
: fileSizeExcludeBlocksUnderConstruction;
|
||||||
|
@ -986,7 +986,7 @@ public class BlockManager {
|
||||||
/**
|
/**
|
||||||
* Check if a block is replicated to at least the minimum replication.
|
* Check if a block is replicated to at least the minimum replication.
|
||||||
*/
|
*/
|
||||||
public boolean isSufficientlyReplicated(BlockInfoContiguous b) {
|
public boolean isSufficientlyReplicated(BlockInfo b) {
|
||||||
// Compare against the lesser of the minReplication and number of live DNs.
|
// Compare against the lesser of the minReplication and number of live DNs.
|
||||||
final int replication =
|
final int replication =
|
||||||
Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
|
Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
|
||||||
|
@ -1027,7 +1027,7 @@ public class BlockManager {
|
||||||
if(numBlocks == 0) {
|
if(numBlocks == 0) {
|
||||||
return new BlocksWithLocations(new BlockWithLocations[0]);
|
return new BlocksWithLocations(new BlockWithLocations[0]);
|
||||||
}
|
}
|
||||||
Iterator<BlockInfoContiguous> iter = node.getBlockIterator();
|
Iterator<BlockInfo> iter = node.getBlockIterator();
|
||||||
// starting from a random block
|
// starting from a random block
|
||||||
int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
|
int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
|
||||||
// skip blocks
|
// skip blocks
|
||||||
|
@ -1036,7 +1036,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
|
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
|
||||||
long totalSize = 0;
|
long totalSize = 0;
|
||||||
BlockInfoContiguous curBlock;
|
BlockInfo curBlock;
|
||||||
while(totalSize<size && iter.hasNext()) {
|
while(totalSize<size && iter.hasNext()) {
|
||||||
curBlock = iter.next();
|
curBlock = iter.next();
|
||||||
if(!curBlock.isComplete()) continue;
|
if(!curBlock.isComplete()) continue;
|
||||||
|
@ -1135,7 +1135,7 @@ public class BlockManager {
|
||||||
public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
|
public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
|
||||||
final DatanodeInfo dn, String storageID, String reason) throws IOException {
|
final DatanodeInfo dn, String storageID, String reason) throws IOException {
|
||||||
assert namesystem.hasWriteLock();
|
assert namesystem.hasWriteLock();
|
||||||
final BlockInfoContiguous storedBlock = getStoredBlock(blk.getLocalBlock());
|
final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock());
|
||||||
if (storedBlock == null) {
|
if (storedBlock == null) {
|
||||||
// Check if the replica is in the blockMap, if not
|
// Check if the replica is in the blockMap, if not
|
||||||
// ignore the request for now. This could happen when BlockScanner
|
// ignore the request for now. This could happen when BlockScanner
|
||||||
|
@ -1694,7 +1694,7 @@ public class BlockManager {
|
||||||
* Use the blockinfo from the blocksmap to be certain we're working
|
* Use the blockinfo from the blocksmap to be certain we're working
|
||||||
* with the most up-to-date block information (e.g. genstamp).
|
* with the most up-to-date block information (e.g. genstamp).
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous bi = blocksMap.getStoredBlock(timedOutItems[i]);
|
BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]);
|
||||||
if (bi == null) {
|
if (bi == null) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1738,16 +1738,16 @@ public class BlockManager {
|
||||||
*/
|
*/
|
||||||
private static class BlockToMarkCorrupt {
|
private static class BlockToMarkCorrupt {
|
||||||
/** The corrupted block in a datanode. */
|
/** The corrupted block in a datanode. */
|
||||||
final BlockInfoContiguous corrupted;
|
final BlockInfo corrupted;
|
||||||
/** The corresponding block stored in the BlockManager. */
|
/** The corresponding block stored in the BlockManager. */
|
||||||
final BlockInfoContiguous stored;
|
final BlockInfo stored;
|
||||||
/** The reason to mark corrupt. */
|
/** The reason to mark corrupt. */
|
||||||
final String reason;
|
final String reason;
|
||||||
/** The reason code to be stored */
|
/** The reason code to be stored */
|
||||||
final Reason reasonCode;
|
final Reason reasonCode;
|
||||||
|
|
||||||
BlockToMarkCorrupt(BlockInfoContiguous corrupted,
|
BlockToMarkCorrupt(BlockInfo corrupted,
|
||||||
BlockInfoContiguous stored, String reason,
|
BlockInfo stored, String reason,
|
||||||
Reason reasonCode) {
|
Reason reasonCode) {
|
||||||
Preconditions.checkNotNull(corrupted, "corrupted is null");
|
Preconditions.checkNotNull(corrupted, "corrupted is null");
|
||||||
Preconditions.checkNotNull(stored, "stored is null");
|
Preconditions.checkNotNull(stored, "stored is null");
|
||||||
|
@ -1758,14 +1758,14 @@ public class BlockManager {
|
||||||
this.reasonCode = reasonCode;
|
this.reasonCode = reasonCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockToMarkCorrupt(BlockInfoContiguous stored, String reason,
|
BlockToMarkCorrupt(BlockInfo stored, String reason,
|
||||||
Reason reasonCode) {
|
Reason reasonCode) {
|
||||||
this(stored, stored, reason, reasonCode);
|
this(stored, stored, reason, reasonCode);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockToMarkCorrupt(BlockInfoContiguous stored, long gs, String reason,
|
BlockToMarkCorrupt(BlockInfo stored, long gs, String reason,
|
||||||
Reason reasonCode) {
|
Reason reasonCode) {
|
||||||
this(new BlockInfoContiguous(stored), stored, reason, reasonCode);
|
this(new BlockInfo(stored), stored, reason, reasonCode);
|
||||||
//the corrupted block in datanode has a different generation stamp
|
//the corrupted block in datanode has a different generation stamp
|
||||||
corrupted.setGenerationStamp(gs);
|
corrupted.setGenerationStamp(gs);
|
||||||
}
|
}
|
||||||
|
@ -1882,10 +1882,10 @@ public class BlockManager {
|
||||||
"longer exists on the DataNode.",
|
"longer exists on the DataNode.",
|
||||||
Long.toHexString(context.getReportId()), zombie.getStorageID());
|
Long.toHexString(context.getReportId()), zombie.getStorageID());
|
||||||
assert(namesystem.hasWriteLock());
|
assert(namesystem.hasWriteLock());
|
||||||
Iterator<BlockInfoContiguous> iter = zombie.getBlockIterator();
|
Iterator<BlockInfo> iter = zombie.getBlockIterator();
|
||||||
int prevBlocks = zombie.numBlocks();
|
int prevBlocks = zombie.numBlocks();
|
||||||
while (iter.hasNext()) {
|
while (iter.hasNext()) {
|
||||||
BlockInfoContiguous block = iter.next();
|
BlockInfo block = iter.next();
|
||||||
// We assume that a block can be on only one storage in a DataNode.
|
// We assume that a block can be on only one storage in a DataNode.
|
||||||
// That's why we pass in the DatanodeDescriptor rather than the
|
// That's why we pass in the DatanodeDescriptor rather than the
|
||||||
// DatanodeStorageInfo.
|
// DatanodeStorageInfo.
|
||||||
|
@ -1943,7 +1943,7 @@ public class BlockManager {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
|
BlockInfo bi = blocksMap.getStoredBlock(b);
|
||||||
if (bi == null) {
|
if (bi == null) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
|
LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
|
||||||
|
@ -1983,7 +1983,7 @@ public class BlockManager {
|
||||||
// Modify the (block-->datanode) map, according to the difference
|
// Modify the (block-->datanode) map, according to the difference
|
||||||
// between the old and new block report.
|
// between the old and new block report.
|
||||||
//
|
//
|
||||||
Collection<BlockInfoContiguous> toAdd = new LinkedList<BlockInfoContiguous>();
|
Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
|
||||||
Collection<Block> toRemove = new TreeSet<Block>();
|
Collection<Block> toRemove = new TreeSet<Block>();
|
||||||
Collection<Block> toInvalidate = new LinkedList<Block>();
|
Collection<Block> toInvalidate = new LinkedList<Block>();
|
||||||
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
||||||
|
@ -2000,7 +2000,7 @@ public class BlockManager {
|
||||||
removeStoredBlock(b, node);
|
removeStoredBlock(b, node);
|
||||||
}
|
}
|
||||||
int numBlocksLogged = 0;
|
int numBlocksLogged = 0;
|
||||||
for (BlockInfoContiguous b : toAdd) {
|
for (BlockInfo b : toAdd) {
|
||||||
addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog);
|
addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog);
|
||||||
numBlocksLogged++;
|
numBlocksLogged++;
|
||||||
}
|
}
|
||||||
|
@ -2022,7 +2022,7 @@ public class BlockManager {
|
||||||
* Mark block replicas as corrupt except those on the storages in
|
* Mark block replicas as corrupt except those on the storages in
|
||||||
* newStorages list.
|
* newStorages list.
|
||||||
*/
|
*/
|
||||||
public void markBlockReplicasAsCorrupt(BlockInfoContiguous block,
|
public void markBlockReplicasAsCorrupt(BlockInfo block,
|
||||||
long oldGenerationStamp, long oldNumBytes,
|
long oldGenerationStamp, long oldNumBytes,
|
||||||
DatanodeStorageInfo[] newStorages) throws IOException {
|
DatanodeStorageInfo[] newStorages) throws IOException {
|
||||||
assert namesystem.hasWriteLock();
|
assert namesystem.hasWriteLock();
|
||||||
|
@ -2091,7 +2091,7 @@ public class BlockManager {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
|
BlockInfo storedBlock = blocksMap.getStoredBlock(iblk);
|
||||||
// If block does not belong to any file, we are done.
|
// If block does not belong to any file, we are done.
|
||||||
if (storedBlock == null) continue;
|
if (storedBlock == null) continue;
|
||||||
|
|
||||||
|
@ -2136,7 +2136,7 @@ public class BlockManager {
|
||||||
|
|
||||||
private void reportDiff(DatanodeStorageInfo storageInfo,
|
private void reportDiff(DatanodeStorageInfo storageInfo,
|
||||||
BlockListAsLongs newReport,
|
BlockListAsLongs newReport,
|
||||||
Collection<BlockInfoContiguous> toAdd, // add to DatanodeDescriptor
|
Collection<BlockInfo> toAdd, // add to DatanodeDescriptor
|
||||||
Collection<Block> toRemove, // remove from DatanodeDescriptor
|
Collection<Block> toRemove, // remove from DatanodeDescriptor
|
||||||
Collection<Block> toInvalidate, // should be removed from DN
|
Collection<Block> toInvalidate, // should be removed from DN
|
||||||
Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
|
Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
|
||||||
|
@ -2144,7 +2144,7 @@ public class BlockManager {
|
||||||
|
|
||||||
// place a delimiter in the list which separates blocks
|
// place a delimiter in the list which separates blocks
|
||||||
// that have been reported from those that have not
|
// that have been reported from those that have not
|
||||||
BlockInfoContiguous delimiter = new BlockInfoContiguous(new Block(), (short) 1);
|
BlockInfo delimiter = new BlockInfo(new Block(), (short) 1);
|
||||||
AddBlockResult result = storageInfo.addBlock(delimiter);
|
AddBlockResult result = storageInfo.addBlock(delimiter);
|
||||||
assert result == AddBlockResult.ADDED
|
assert result == AddBlockResult.ADDED
|
||||||
: "Delimiting block cannot be present in the node";
|
: "Delimiting block cannot be present in the node";
|
||||||
|
@ -2157,7 +2157,7 @@ public class BlockManager {
|
||||||
// scan the report and process newly reported blocks
|
// scan the report and process newly reported blocks
|
||||||
for (BlockReportReplica iblk : newReport) {
|
for (BlockReportReplica iblk : newReport) {
|
||||||
ReplicaState iState = iblk.getState();
|
ReplicaState iState = iblk.getState();
|
||||||
BlockInfoContiguous storedBlock = processReportedBlock(storageInfo,
|
BlockInfo storedBlock = processReportedBlock(storageInfo,
|
||||||
iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
|
iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
|
||||||
|
|
||||||
// move block to the head of the list
|
// move block to the head of the list
|
||||||
|
@ -2169,7 +2169,7 @@ public class BlockManager {
|
||||||
|
|
||||||
// collect blocks that have not been reported
|
// collect blocks that have not been reported
|
||||||
// all of them are next to the delimiter
|
// all of them are next to the delimiter
|
||||||
Iterator<BlockInfoContiguous> it =
|
Iterator<BlockInfo> it =
|
||||||
storageInfo.new BlockIterator(delimiter.getNext(0));
|
storageInfo.new BlockIterator(delimiter.getNext(0));
|
||||||
while(it.hasNext())
|
while(it.hasNext())
|
||||||
toRemove.add(it.next());
|
toRemove.add(it.next());
|
||||||
|
@ -2207,10 +2207,10 @@ public class BlockManager {
|
||||||
* @return the up-to-date stored block, if it should be kept.
|
* @return the up-to-date stored block, if it should be kept.
|
||||||
* Otherwise, null.
|
* Otherwise, null.
|
||||||
*/
|
*/
|
||||||
private BlockInfoContiguous processReportedBlock(
|
private BlockInfo processReportedBlock(
|
||||||
final DatanodeStorageInfo storageInfo,
|
final DatanodeStorageInfo storageInfo,
|
||||||
final Block block, final ReplicaState reportedState,
|
final Block block, final ReplicaState reportedState,
|
||||||
final Collection<BlockInfoContiguous> toAdd,
|
final Collection<BlockInfo> toAdd,
|
||||||
final Collection<Block> toInvalidate,
|
final Collection<Block> toInvalidate,
|
||||||
final Collection<BlockToMarkCorrupt> toCorrupt,
|
final Collection<BlockToMarkCorrupt> toCorrupt,
|
||||||
final Collection<StatefulBlockInfo> toUC) {
|
final Collection<StatefulBlockInfo> toUC) {
|
||||||
|
@ -2231,7 +2231,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// find block by blockId
|
// find block by blockId
|
||||||
BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
|
BlockInfo storedBlock = blocksMap.getStoredBlock(block);
|
||||||
if(storedBlock == null) {
|
if(storedBlock == null) {
|
||||||
// If blocksMap does not contain reported block id,
|
// If blocksMap does not contain reported block id,
|
||||||
// the replica should be removed from the data-node.
|
// the replica should be removed from the data-node.
|
||||||
|
@ -2370,7 +2370,7 @@ public class BlockManager {
|
||||||
*/
|
*/
|
||||||
private BlockToMarkCorrupt checkReplicaCorrupt(
|
private BlockToMarkCorrupt checkReplicaCorrupt(
|
||||||
Block reported, ReplicaState reportedState,
|
Block reported, ReplicaState reportedState,
|
||||||
BlockInfoContiguous storedBlock, BlockUCState ucState,
|
BlockInfo storedBlock, BlockUCState ucState,
|
||||||
DatanodeDescriptor dn) {
|
DatanodeDescriptor dn) {
|
||||||
switch(reportedState) {
|
switch(reportedState) {
|
||||||
case FINALIZED:
|
case FINALIZED:
|
||||||
|
@ -2443,7 +2443,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isBlockUnderConstruction(BlockInfoContiguous storedBlock,
|
private boolean isBlockUnderConstruction(BlockInfo storedBlock,
|
||||||
BlockUCState ucState, ReplicaState reportedState) {
|
BlockUCState ucState, ReplicaState reportedState) {
|
||||||
switch(reportedState) {
|
switch(reportedState) {
|
||||||
case FINALIZED:
|
case FINALIZED:
|
||||||
|
@ -2487,7 +2487,7 @@ public class BlockManager {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void addStoredBlockImmediate(BlockInfoContiguous storedBlock,
|
private void addStoredBlockImmediate(BlockInfo storedBlock,
|
||||||
DatanodeStorageInfo storageInfo)
|
DatanodeStorageInfo storageInfo)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert (storedBlock != null && namesystem.hasWriteLock());
|
assert (storedBlock != null && namesystem.hasWriteLock());
|
||||||
|
@ -2519,13 +2519,13 @@ public class BlockManager {
|
||||||
* needed replications if this takes care of the problem.
|
* needed replications if this takes care of the problem.
|
||||||
* @return the block that is stored in blockMap.
|
* @return the block that is stored in blockMap.
|
||||||
*/
|
*/
|
||||||
private Block addStoredBlock(final BlockInfoContiguous block,
|
private Block addStoredBlock(final BlockInfo block,
|
||||||
DatanodeStorageInfo storageInfo,
|
DatanodeStorageInfo storageInfo,
|
||||||
DatanodeDescriptor delNodeHint,
|
DatanodeDescriptor delNodeHint,
|
||||||
boolean logEveryBlock)
|
boolean logEveryBlock)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert block != null && namesystem.hasWriteLock();
|
assert block != null && namesystem.hasWriteLock();
|
||||||
BlockInfoContiguous storedBlock;
|
BlockInfo storedBlock;
|
||||||
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||||
if (block instanceof BlockInfoContiguousUnderConstruction) {
|
if (block instanceof BlockInfoContiguousUnderConstruction) {
|
||||||
//refresh our copy in case the block got completed in another thread
|
//refresh our copy in case the block got completed in another thread
|
||||||
|
@ -2623,7 +2623,7 @@ public class BlockManager {
|
||||||
return storedBlock;
|
return storedBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void logAddStoredBlock(BlockInfoContiguous storedBlock,
|
private void logAddStoredBlock(BlockInfo storedBlock,
|
||||||
DatanodeDescriptor node) {
|
DatanodeDescriptor node) {
|
||||||
if (!blockLog.isInfoEnabled()) {
|
if (!blockLog.isInfoEnabled()) {
|
||||||
return;
|
return;
|
||||||
|
@ -2651,7 +2651,7 @@ public class BlockManager {
|
||||||
*
|
*
|
||||||
* @param blk Block whose corrupt replicas need to be invalidated
|
* @param blk Block whose corrupt replicas need to be invalidated
|
||||||
*/
|
*/
|
||||||
private void invalidateCorruptReplicas(BlockInfoContiguous blk) {
|
private void invalidateCorruptReplicas(BlockInfo blk) {
|
||||||
Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
|
Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
|
||||||
boolean removedFromBlocksMap = true;
|
boolean removedFromBlocksMap = true;
|
||||||
if (nodes == null)
|
if (nodes == null)
|
||||||
|
@ -2730,7 +2730,7 @@ public class BlockManager {
|
||||||
long nrInvalid = 0, nrOverReplicated = 0;
|
long nrInvalid = 0, nrOverReplicated = 0;
|
||||||
long nrUnderReplicated = 0, nrPostponed = 0, nrUnderConstruction = 0;
|
long nrUnderReplicated = 0, nrPostponed = 0, nrUnderConstruction = 0;
|
||||||
long startTimeMisReplicatedScan = Time.monotonicNow();
|
long startTimeMisReplicatedScan = Time.monotonicNow();
|
||||||
Iterator<BlockInfoContiguous> blocksItr = blocksMap.getBlocks().iterator();
|
Iterator<BlockInfo> blocksItr = blocksMap.getBlocks().iterator();
|
||||||
long totalBlocks = blocksMap.size();
|
long totalBlocks = blocksMap.size();
|
||||||
replicationQueuesInitProgress = 0;
|
replicationQueuesInitProgress = 0;
|
||||||
long totalProcessed = 0;
|
long totalProcessed = 0;
|
||||||
|
@ -2742,7 +2742,7 @@ public class BlockManager {
|
||||||
namesystem.writeLockInterruptibly();
|
namesystem.writeLockInterruptibly();
|
||||||
try {
|
try {
|
||||||
while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
|
while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
|
||||||
BlockInfoContiguous block = blocksItr.next();
|
BlockInfo block = blocksItr.next();
|
||||||
MisReplicationResult res = processMisReplicatedBlock(block);
|
MisReplicationResult res = processMisReplicatedBlock(block);
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("block " + block + ": " + res);
|
LOG.trace("block " + block + ": " + res);
|
||||||
|
@ -2817,7 +2817,7 @@ public class BlockManager {
|
||||||
* appropriate queues if necessary, and returns a result code indicating
|
* appropriate queues if necessary, and returns a result code indicating
|
||||||
* what happened with it.
|
* what happened with it.
|
||||||
*/
|
*/
|
||||||
private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block) {
|
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
|
||||||
if (block.isDeleted()) {
|
if (block.isDeleted()) {
|
||||||
// block does not belong to any file
|
// block does not belong to any file
|
||||||
addToInvalidates(block);
|
addToInvalidates(block);
|
||||||
|
@ -3157,7 +3157,7 @@ public class BlockManager {
|
||||||
ReplicaState reportedState, DatanodeDescriptor delHintNode)
|
ReplicaState reportedState, DatanodeDescriptor delHintNode)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// blockReceived reports a finalized block
|
// blockReceived reports a finalized block
|
||||||
Collection<BlockInfoContiguous> toAdd = new LinkedList<BlockInfoContiguous>();
|
Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
|
||||||
Collection<Block> toInvalidate = new LinkedList<Block>();
|
Collection<Block> toInvalidate = new LinkedList<Block>();
|
||||||
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
||||||
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
|
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
|
||||||
|
@ -3174,7 +3174,7 @@ public class BlockManager {
|
||||||
addStoredBlockUnderConstruction(b, storageInfo);
|
addStoredBlockUnderConstruction(b, storageInfo);
|
||||||
}
|
}
|
||||||
long numBlocksLogged = 0;
|
long numBlocksLogged = 0;
|
||||||
for (BlockInfoContiguous b : toAdd) {
|
for (BlockInfo b : toAdd) {
|
||||||
addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
|
addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
|
||||||
numBlocksLogged++;
|
numBlocksLogged++;
|
||||||
}
|
}
|
||||||
|
@ -3301,7 +3301,7 @@ public class BlockManager {
|
||||||
* @param b - the block being tested
|
* @param b - the block being tested
|
||||||
* @return count of live nodes for this block
|
* @return count of live nodes for this block
|
||||||
*/
|
*/
|
||||||
int countLiveNodes(BlockInfoContiguous b) {
|
int countLiveNodes(BlockInfo b) {
|
||||||
if (!namesystem.isInStartupSafeMode()) {
|
if (!namesystem.isInStartupSafeMode()) {
|
||||||
return countNodes(b).liveReplicas();
|
return countNodes(b).liveReplicas();
|
||||||
}
|
}
|
||||||
|
@ -3380,7 +3380,7 @@ public class BlockManager {
|
||||||
return blocksMap.size();
|
return blocksMap.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
public DatanodeStorageInfo[] getStorages(BlockInfoContiguous block) {
|
public DatanodeStorageInfo[] getStorages(BlockInfo block) {
|
||||||
final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()];
|
final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for(DatanodeStorageInfo s : blocksMap.getStorages(block)) {
|
for(DatanodeStorageInfo s : blocksMap.getStorages(block)) {
|
||||||
|
@ -3409,7 +3409,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous getStoredBlock(Block block) {
|
public BlockInfo getStoredBlock(Block block) {
|
||||||
return blocksMap.getStoredBlock(block);
|
return blocksMap.getStoredBlock(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3462,8 +3462,8 @@ public class BlockManager {
|
||||||
* replicated.
|
* replicated.
|
||||||
*/
|
*/
|
||||||
public boolean checkBlocksProperlyReplicated(
|
public boolean checkBlocksProperlyReplicated(
|
||||||
String src, BlockInfoContiguous[] blocks) {
|
String src, BlockInfo[] blocks) {
|
||||||
for (BlockInfoContiguous b: blocks) {
|
for (BlockInfo b: blocks) {
|
||||||
if (!b.isComplete()) {
|
if (!b.isComplete()) {
|
||||||
final BlockInfoContiguousUnderConstruction uc =
|
final BlockInfoContiguousUnderConstruction uc =
|
||||||
(BlockInfoContiguousUnderConstruction)b;
|
(BlockInfoContiguousUnderConstruction)b;
|
||||||
|
@ -3578,7 +3578,7 @@ public class BlockManager {
|
||||||
return this.neededReplications.getCorruptReplOneBlockSize();
|
return this.neededReplications.getCorruptReplOneBlockSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous addBlockCollection(BlockInfoContiguous block,
|
public BlockInfo addBlockCollection(BlockInfo block,
|
||||||
BlockCollection bc) {
|
BlockCollection bc) {
|
||||||
return blocksMap.addBlockCollection(block, bc);
|
return blocksMap.addBlockCollection(block, bc);
|
||||||
}
|
}
|
||||||
|
@ -3818,7 +3818,7 @@ public class BlockManager {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A simple result enum for the result of
|
* A simple result enum for the result of
|
||||||
* {@link BlockManager#processMisReplicatedBlock(BlockInfoContiguous)}.
|
* {@link BlockManager#processMisReplicatedBlock(BlockInfo)}.
|
||||||
*/
|
*/
|
||||||
enum MisReplicationResult {
|
enum MisReplicationResult {
|
||||||
/** The block should be invalidated since it belongs to a deleted file. */
|
/** The block should be invalidated since it belongs to a deleted file. */
|
||||||
|
|
|
@ -36,10 +36,10 @@ import com.google.common.collect.Iterables;
|
||||||
*/
|
*/
|
||||||
class BlocksMap {
|
class BlocksMap {
|
||||||
private static class StorageIterator implements Iterator<DatanodeStorageInfo> {
|
private static class StorageIterator implements Iterator<DatanodeStorageInfo> {
|
||||||
private final BlockInfoContiguous blockInfo;
|
private final BlockInfo blockInfo;
|
||||||
private int nextIdx = 0;
|
private int nextIdx = 0;
|
||||||
|
|
||||||
StorageIterator(BlockInfoContiguous blkInfo) {
|
StorageIterator(BlockInfo blkInfo) {
|
||||||
this.blockInfo = blkInfo;
|
this.blockInfo = blkInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,14 +63,14 @@ class BlocksMap {
|
||||||
/** Constant {@link LightWeightGSet} capacity. */
|
/** Constant {@link LightWeightGSet} capacity. */
|
||||||
private final int capacity;
|
private final int capacity;
|
||||||
|
|
||||||
private GSet<Block, BlockInfoContiguous> blocks;
|
private GSet<Block, BlockInfo> blocks;
|
||||||
|
|
||||||
BlocksMap(int capacity) {
|
BlocksMap(int capacity) {
|
||||||
// Use 2% of total memory to size the GSet capacity
|
// Use 2% of total memory to size the GSet capacity
|
||||||
this.capacity = capacity;
|
this.capacity = capacity;
|
||||||
this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
|
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
|
||||||
@Override
|
@Override
|
||||||
public Iterator<BlockInfoContiguous> iterator() {
|
public Iterator<BlockInfo> iterator() {
|
||||||
SetIterator iterator = new SetIterator();
|
SetIterator iterator = new SetIterator();
|
||||||
/*
|
/*
|
||||||
* Not tracking any modifications to set. As this set will be used
|
* Not tracking any modifications to set. As this set will be used
|
||||||
|
@ -97,15 +97,15 @@ class BlocksMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockCollection getBlockCollection(Block b) {
|
BlockCollection getBlockCollection(Block b) {
|
||||||
BlockInfoContiguous info = blocks.get(b);
|
BlockInfo info = blocks.get(b);
|
||||||
return (info != null) ? info.getBlockCollection() : null;
|
return (info != null) ? info.getBlockCollection() : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add block b belonging to the specified block collection to the map.
|
* Add block b belonging to the specified block collection to the map.
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous addBlockCollection(BlockInfoContiguous b, BlockCollection bc) {
|
BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
|
||||||
BlockInfoContiguous info = blocks.get(b);
|
BlockInfo info = blocks.get(b);
|
||||||
if (info != b) {
|
if (info != b) {
|
||||||
info = b;
|
info = b;
|
||||||
blocks.put(info);
|
blocks.put(info);
|
||||||
|
@ -120,7 +120,7 @@ class BlocksMap {
|
||||||
* and remove all data-node locations associated with the block.
|
* and remove all data-node locations associated with the block.
|
||||||
*/
|
*/
|
||||||
void removeBlock(Block block) {
|
void removeBlock(Block block) {
|
||||||
BlockInfoContiguous blockInfo = blocks.remove(block);
|
BlockInfo blockInfo = blocks.remove(block);
|
||||||
if (blockInfo == null)
|
if (blockInfo == null)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ class BlocksMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns the block object it it exists in the map. */
|
/** Returns the block object it it exists in the map. */
|
||||||
BlockInfoContiguous getStoredBlock(Block b) {
|
BlockInfo getStoredBlock(Block b) {
|
||||||
return blocks.get(b);
|
return blocks.get(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ class BlocksMap {
|
||||||
* For a block that has already been retrieved from the BlocksMap
|
* For a block that has already been retrieved from the BlocksMap
|
||||||
* returns {@link Iterable} of the storages the block belongs to.
|
* returns {@link Iterable} of the storages the block belongs to.
|
||||||
*/
|
*/
|
||||||
Iterable<DatanodeStorageInfo> getStorages(final BlockInfoContiguous storedBlock) {
|
Iterable<DatanodeStorageInfo> getStorages(final BlockInfo storedBlock) {
|
||||||
return new Iterable<DatanodeStorageInfo>() {
|
return new Iterable<DatanodeStorageInfo>() {
|
||||||
@Override
|
@Override
|
||||||
public Iterator<DatanodeStorageInfo> iterator() {
|
public Iterator<DatanodeStorageInfo> iterator() {
|
||||||
|
@ -175,7 +175,7 @@ class BlocksMap {
|
||||||
|
|
||||||
/** counts number of containing nodes. Better than using iterator. */
|
/** counts number of containing nodes. Better than using iterator. */
|
||||||
int numNodes(Block b) {
|
int numNodes(Block b) {
|
||||||
BlockInfoContiguous info = blocks.get(b);
|
BlockInfo info = blocks.get(b);
|
||||||
return info == null ? 0 : info.numNodes();
|
return info == null ? 0 : info.numNodes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ class BlocksMap {
|
||||||
* only if it does not belong to any file and data-nodes.
|
* only if it does not belong to any file and data-nodes.
|
||||||
*/
|
*/
|
||||||
boolean removeNode(Block b, DatanodeDescriptor node) {
|
boolean removeNode(Block b, DatanodeDescriptor node) {
|
||||||
BlockInfoContiguous info = blocks.get(b);
|
BlockInfo info = blocks.get(b);
|
||||||
if (info == null)
|
if (info == null)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ class BlocksMap {
|
||||||
return blocks.size();
|
return blocks.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterable<BlockInfoContiguous> getBlocks() {
|
Iterable<BlockInfo> getBlocks() {
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,8 +218,8 @@ class BlocksMap {
|
||||||
* @param newBlock - block for replacement
|
* @param newBlock - block for replacement
|
||||||
* @return new block
|
* @return new block
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous replaceBlock(BlockInfoContiguous newBlock) {
|
BlockInfo replaceBlock(BlockInfo newBlock) {
|
||||||
BlockInfoContiguous currentBlock = blocks.get(newBlock);
|
BlockInfo currentBlock = blocks.get(newBlock);
|
||||||
assert currentBlock != null : "the block if not in blocksMap";
|
assert currentBlock != null : "the block if not in blocksMap";
|
||||||
// replace block in data-node lists
|
// replace block in data-node lists
|
||||||
for (int i = currentBlock.numNodes() - 1; i >= 0; i--) {
|
for (int i = currentBlock.numNodes() - 1; i >= 0; i--) {
|
||||||
|
|
|
@ -369,7 +369,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
* @param file The file.
|
* @param file The file.
|
||||||
*/
|
*/
|
||||||
private void rescanFile(CacheDirective directive, INodeFile file) {
|
private void rescanFile(CacheDirective directive, INodeFile file) {
|
||||||
BlockInfoContiguous[] blockInfos = file.getBlocks();
|
BlockInfo[] blockInfos = file.getBlocks();
|
||||||
|
|
||||||
// Increment the "needed" statistics
|
// Increment the "needed" statistics
|
||||||
directive.addFilesNeeded(1);
|
directive.addFilesNeeded(1);
|
||||||
|
@ -394,7 +394,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
long cachedTotal = 0;
|
long cachedTotal = 0;
|
||||||
for (BlockInfoContiguous blockInfo : blockInfos) {
|
for (BlockInfo blockInfo : blockInfos) {
|
||||||
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
|
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
|
||||||
// We don't try to cache blocks that are under construction.
|
// We don't try to cache blocks that are under construction.
|
||||||
LOG.trace("Directive {}: can't cache block {} because it is in state "
|
LOG.trace("Directive {}: can't cache block {} because it is in state "
|
||||||
|
@ -453,7 +453,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private String findReasonForNotCaching(CachedBlock cblock,
|
private String findReasonForNotCaching(CachedBlock cblock,
|
||||||
BlockInfoContiguous blockInfo) {
|
BlockInfo blockInfo) {
|
||||||
if (blockInfo == null) {
|
if (blockInfo == null) {
|
||||||
// Somehow, a cache report with the block arrived, but the block
|
// Somehow, a cache report with the block arrived, but the block
|
||||||
// reports from the DataNode haven't (yet?) described such a block.
|
// reports from the DataNode haven't (yet?) described such a block.
|
||||||
|
@ -513,7 +513,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
iter.remove();
|
iter.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BlockInfoContiguous blockInfo = blockManager.
|
BlockInfo blockInfo = blockManager.
|
||||||
getStoredBlock(new Block(cblock.getBlockId()));
|
getStoredBlock(new Block(cblock.getBlockId()));
|
||||||
String reason = findReasonForNotCaching(cblock, blockInfo);
|
String reason = findReasonForNotCaching(cblock, blockInfo);
|
||||||
int neededCached = 0;
|
int neededCached = 0;
|
||||||
|
@ -628,7 +628,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
List<DatanodeDescriptor> pendingCached) {
|
List<DatanodeDescriptor> pendingCached) {
|
||||||
// To figure out which replicas can be cached, we consult the
|
// To figure out which replicas can be cached, we consult the
|
||||||
// blocksMap. We don't want to try to cache a corrupt replica, though.
|
// blocksMap. We don't want to try to cache a corrupt replica, though.
|
||||||
BlockInfoContiguous blockInfo = blockManager.
|
BlockInfo blockInfo = blockManager.
|
||||||
getStoredBlock(new Block(cachedBlock.getBlockId()));
|
getStoredBlock(new Block(cachedBlock.getBlockId()));
|
||||||
if (blockInfo == null) {
|
if (blockInfo == null) {
|
||||||
LOG.debug("Block {}: can't add new cached replicas," +
|
LOG.debug("Block {}: can't add new cached replicas," +
|
||||||
|
@ -667,7 +667,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
Iterator<CachedBlock> it = datanode.getPendingCached().iterator();
|
Iterator<CachedBlock> it = datanode.getPendingCached().iterator();
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
CachedBlock cBlock = it.next();
|
CachedBlock cBlock = it.next();
|
||||||
BlockInfoContiguous info =
|
BlockInfo info =
|
||||||
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
|
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
|
||||||
if (info != null) {
|
if (info != null) {
|
||||||
pendingBytes -= info.getNumBytes();
|
pendingBytes -= info.getNumBytes();
|
||||||
|
@ -677,7 +677,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
||||||
// Add pending uncached blocks from effective capacity
|
// Add pending uncached blocks from effective capacity
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
CachedBlock cBlock = it.next();
|
CachedBlock cBlock = it.next();
|
||||||
BlockInfoContiguous info =
|
BlockInfo info =
|
||||||
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
|
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
|
||||||
if (info != null) {
|
if (info != null) {
|
||||||
pendingBytes += info.getNumBytes();
|
pendingBytes += info.getNumBytes();
|
||||||
|
|
|
@ -335,7 +335,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
* Remove block from the list of blocks belonging to the data-node. Remove
|
* Remove block from the list of blocks belonging to the data-node. Remove
|
||||||
* data-node from the block.
|
* data-node from the block.
|
||||||
*/
|
*/
|
||||||
boolean removeBlock(BlockInfoContiguous b) {
|
boolean removeBlock(BlockInfo b) {
|
||||||
final DatanodeStorageInfo s = b.findStorageInfo(this);
|
final DatanodeStorageInfo s = b.findStorageInfo(this);
|
||||||
// if block exists on this datanode
|
// if block exists on this datanode
|
||||||
if (s != null) {
|
if (s != null) {
|
||||||
|
@ -348,7 +348,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
* Remove block from the list of blocks belonging to the data-node. Remove
|
* Remove block from the list of blocks belonging to the data-node. Remove
|
||||||
* data-node from the block.
|
* data-node from the block.
|
||||||
*/
|
*/
|
||||||
boolean removeBlock(String storageID, BlockInfoContiguous b) {
|
boolean removeBlock(String storageID, BlockInfo b) {
|
||||||
DatanodeStorageInfo s = getStorageInfo(storageID);
|
DatanodeStorageInfo s = getStorageInfo(storageID);
|
||||||
if (s != null) {
|
if (s != null) {
|
||||||
return s.removeBlock(b);
|
return s.removeBlock(b);
|
||||||
|
@ -537,12 +537,12 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class BlockIterator implements Iterator<BlockInfoContiguous> {
|
private static class BlockIterator implements Iterator<BlockInfo> {
|
||||||
private int index = 0;
|
private int index = 0;
|
||||||
private final List<Iterator<BlockInfoContiguous>> iterators;
|
private final List<Iterator<BlockInfo>> iterators;
|
||||||
|
|
||||||
private BlockIterator(final DatanodeStorageInfo... storages) {
|
private BlockIterator(final DatanodeStorageInfo... storages) {
|
||||||
List<Iterator<BlockInfoContiguous>> iterators = new ArrayList<Iterator<BlockInfoContiguous>>();
|
List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
|
||||||
for (DatanodeStorageInfo e : storages) {
|
for (DatanodeStorageInfo e : storages) {
|
||||||
iterators.add(e.getBlockIterator());
|
iterators.add(e.getBlockIterator());
|
||||||
}
|
}
|
||||||
|
@ -556,7 +556,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlockInfoContiguous next() {
|
public BlockInfo next() {
|
||||||
update();
|
update();
|
||||||
return iterators.get(index).next();
|
return iterators.get(index).next();
|
||||||
}
|
}
|
||||||
|
@ -573,10 +573,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator<BlockInfoContiguous> getBlockIterator() {
|
Iterator<BlockInfo> getBlockIterator() {
|
||||||
return new BlockIterator(getStorageInfos());
|
return new BlockIterator(getStorageInfos());
|
||||||
}
|
}
|
||||||
Iterator<BlockInfoContiguous> getBlockIterator(final String storageID) {
|
Iterator<BlockInfo> getBlockIterator(final String storageID) {
|
||||||
return new BlockIterator(getStorageInfo(storageID));
|
return new BlockIterator(getStorageInfo(storageID));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,9 +25,6 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
|
@ -83,10 +80,10 @@ public class DatanodeStorageInfo {
|
||||||
/**
|
/**
|
||||||
* Iterates over the list of blocks belonging to the data-node.
|
* Iterates over the list of blocks belonging to the data-node.
|
||||||
*/
|
*/
|
||||||
class BlockIterator implements Iterator<BlockInfoContiguous> {
|
class BlockIterator implements Iterator<BlockInfo> {
|
||||||
private BlockInfoContiguous current;
|
private BlockInfo current;
|
||||||
|
|
||||||
BlockIterator(BlockInfoContiguous head) {
|
BlockIterator(BlockInfo head) {
|
||||||
this.current = head;
|
this.current = head;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,8 +91,8 @@ public class DatanodeStorageInfo {
|
||||||
return current != null;
|
return current != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous next() {
|
public BlockInfo next() {
|
||||||
BlockInfoContiguous res = current;
|
BlockInfo res = current;
|
||||||
current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
|
current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -115,7 +112,7 @@ public class DatanodeStorageInfo {
|
||||||
private volatile long remaining;
|
private volatile long remaining;
|
||||||
private long blockPoolUsed;
|
private long blockPoolUsed;
|
||||||
|
|
||||||
private volatile BlockInfoContiguous blockList = null;
|
private volatile BlockInfo blockList = null;
|
||||||
private int numBlocks = 0;
|
private int numBlocks = 0;
|
||||||
|
|
||||||
// The ID of the last full block report which updated this storage.
|
// The ID of the last full block report which updated this storage.
|
||||||
|
@ -229,7 +226,7 @@ public class DatanodeStorageInfo {
|
||||||
return blockPoolUsed;
|
return blockPoolUsed;
|
||||||
}
|
}
|
||||||
|
|
||||||
public AddBlockResult addBlock(BlockInfoContiguous b) {
|
public AddBlockResult addBlock(BlockInfo b) {
|
||||||
// First check whether the block belongs to a different storage
|
// First check whether the block belongs to a different storage
|
||||||
// on the same DN.
|
// on the same DN.
|
||||||
AddBlockResult result = AddBlockResult.ADDED;
|
AddBlockResult result = AddBlockResult.ADDED;
|
||||||
|
@ -254,7 +251,7 @@ public class DatanodeStorageInfo {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean removeBlock(BlockInfoContiguous b) {
|
public boolean removeBlock(BlockInfo b) {
|
||||||
blockList = b.listRemove(blockList, this);
|
blockList = b.listRemove(blockList, this);
|
||||||
if (b.removeStorage(this)) {
|
if (b.removeStorage(this)) {
|
||||||
numBlocks--;
|
numBlocks--;
|
||||||
|
@ -268,7 +265,7 @@ public class DatanodeStorageInfo {
|
||||||
return numBlocks;
|
return numBlocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator<BlockInfoContiguous> getBlockIterator() {
|
Iterator<BlockInfo> getBlockIterator() {
|
||||||
return new BlockIterator(blockList);
|
return new BlockIterator(blockList);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -277,7 +274,7 @@ public class DatanodeStorageInfo {
|
||||||
* Move block to the head of the list of blocks belonging to the data-node.
|
* Move block to the head of the list of blocks belonging to the data-node.
|
||||||
* @return the index of the head of the blockList
|
* @return the index of the head of the blockList
|
||||||
*/
|
*/
|
||||||
int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) {
|
int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) {
|
||||||
blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
|
blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
|
||||||
return curIndex;
|
return curIndex;
|
||||||
}
|
}
|
||||||
|
@ -287,7 +284,7 @@ public class DatanodeStorageInfo {
|
||||||
* @return the head of the blockList
|
* @return the head of the blockList
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
BlockInfoContiguous getBlockListHeadForTesting(){
|
BlockInfo getBlockListHeadForTesting(){
|
||||||
return blockList;
|
return blockList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class DecommissionManager {
|
||||||
* reports or other events. Before being finally marking as decommissioned,
|
* reports or other events. Before being finally marking as decommissioned,
|
||||||
* another check is done with the actual block map.
|
* another check is done with the actual block map.
|
||||||
*/
|
*/
|
||||||
private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
|
private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfo>>
|
||||||
decomNodeBlocks;
|
decomNodeBlocks;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -248,7 +248,7 @@ public class DecommissionManager {
|
||||||
* Full-strength replication is not always necessary, hence "sufficient".
|
* Full-strength replication is not always necessary, hence "sufficient".
|
||||||
* @return true if sufficient, else false.
|
* @return true if sufficient, else false.
|
||||||
*/
|
*/
|
||||||
private boolean isSufficientlyReplicated(BlockInfoContiguous block,
|
private boolean isSufficientlyReplicated(BlockInfo block,
|
||||||
BlockCollection bc,
|
BlockCollection bc,
|
||||||
NumberReplicas numberReplicas) {
|
NumberReplicas numberReplicas) {
|
||||||
final int numExpected = bc.getPreferredBlockReplication();
|
final int numExpected = bc.getPreferredBlockReplication();
|
||||||
|
@ -412,7 +412,7 @@ public class DecommissionManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void check() {
|
private void check() {
|
||||||
final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>>
|
final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>>
|
||||||
it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
|
it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
|
||||||
final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
|
final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
|
||||||
|
|
||||||
|
@ -420,10 +420,10 @@ public class DecommissionManager {
|
||||||
&& !exceededNumBlocksPerCheck()
|
&& !exceededNumBlocksPerCheck()
|
||||||
&& !exceededNumNodesPerCheck()) {
|
&& !exceededNumNodesPerCheck()) {
|
||||||
numNodesChecked++;
|
numNodesChecked++;
|
||||||
final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
|
final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>
|
||||||
entry = it.next();
|
entry = it.next();
|
||||||
final DatanodeDescriptor dn = entry.getKey();
|
final DatanodeDescriptor dn = entry.getKey();
|
||||||
AbstractList<BlockInfoContiguous> blocks = entry.getValue();
|
AbstractList<BlockInfo> blocks = entry.getValue();
|
||||||
boolean fullScan = false;
|
boolean fullScan = false;
|
||||||
if (blocks == null) {
|
if (blocks == null) {
|
||||||
// This is a newly added datanode, run through its list to schedule
|
// This is a newly added datanode, run through its list to schedule
|
||||||
|
@ -495,7 +495,7 @@ public class DecommissionManager {
|
||||||
* datanode.
|
* datanode.
|
||||||
*/
|
*/
|
||||||
private void pruneSufficientlyReplicated(final DatanodeDescriptor datanode,
|
private void pruneSufficientlyReplicated(final DatanodeDescriptor datanode,
|
||||||
AbstractList<BlockInfoContiguous> blocks) {
|
AbstractList<BlockInfo> blocks) {
|
||||||
processBlocksForDecomInternal(datanode, blocks.iterator(), null, true);
|
processBlocksForDecomInternal(datanode, blocks.iterator(), null, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,9 +509,9 @@ public class DecommissionManager {
|
||||||
* @param datanode
|
* @param datanode
|
||||||
* @return List of insufficiently replicated blocks
|
* @return List of insufficiently replicated blocks
|
||||||
*/
|
*/
|
||||||
private AbstractList<BlockInfoContiguous> handleInsufficientlyReplicated(
|
private AbstractList<BlockInfo> handleInsufficientlyReplicated(
|
||||||
final DatanodeDescriptor datanode) {
|
final DatanodeDescriptor datanode) {
|
||||||
AbstractList<BlockInfoContiguous> insufficient = new ChunkedArrayList<>();
|
AbstractList<BlockInfo> insufficient = new ChunkedArrayList<>();
|
||||||
processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
|
processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
|
||||||
insufficient, false);
|
insufficient, false);
|
||||||
return insufficient;
|
return insufficient;
|
||||||
|
@ -535,8 +535,8 @@ public class DecommissionManager {
|
||||||
*/
|
*/
|
||||||
private void processBlocksForDecomInternal(
|
private void processBlocksForDecomInternal(
|
||||||
final DatanodeDescriptor datanode,
|
final DatanodeDescriptor datanode,
|
||||||
final Iterator<BlockInfoContiguous> it,
|
final Iterator<BlockInfo> it,
|
||||||
final List<BlockInfoContiguous> insufficientlyReplicated,
|
final List<BlockInfo> insufficientlyReplicated,
|
||||||
boolean pruneSufficientlyReplicated) {
|
boolean pruneSufficientlyReplicated) {
|
||||||
boolean firstReplicationLog = true;
|
boolean firstReplicationLog = true;
|
||||||
int underReplicatedBlocks = 0;
|
int underReplicatedBlocks = 0;
|
||||||
|
@ -544,7 +544,7 @@ public class DecommissionManager {
|
||||||
int underReplicatedInOpenFiles = 0;
|
int underReplicatedInOpenFiles = 0;
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
numBlocksChecked++;
|
numBlocksChecked++;
|
||||||
final BlockInfoContiguous block = it.next();
|
final BlockInfo block = it.next();
|
||||||
// Remove the block from the list if it's no longer in the block map,
|
// Remove the block from the list if it's no longer in the block map,
|
||||||
// e.g. the containing file has been deleted
|
// e.g. the containing file has been deleted
|
||||||
if (blockManager.blocksMap.getStoredBlock(block) == null) {
|
if (blockManager.blocksMap.getStoredBlock(block) == null) {
|
||||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
|
@ -235,7 +235,7 @@ class FSDirWriteFileOp {
|
||||||
return onRetryBlock[0];
|
return onRetryBlock[0];
|
||||||
} else {
|
} else {
|
||||||
// add new chosen targets to already allocated block and return
|
// add new chosen targets to already allocated block and return
|
||||||
BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
|
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
|
||||||
((BlockInfoContiguousUnderConstruction) lastBlockInFile)
|
((BlockInfoContiguousUnderConstruction) lastBlockInFile)
|
||||||
.setExpectedLocations(targets);
|
.setExpectedLocations(targets);
|
||||||
offset = pendingFile.computeFileSize();
|
offset = pendingFile.computeFileSize();
|
||||||
|
@ -507,7 +507,7 @@ class FSDirWriteFileOp {
|
||||||
/**
|
/**
|
||||||
* Add a block to the file. Returns a reference to the added block.
|
* Add a block to the file. Returns a reference to the added block.
|
||||||
*/
|
*/
|
||||||
private static BlockInfoContiguous addBlock(
|
private static BlockInfo addBlock(
|
||||||
FSDirectory fsd, String path, INodesInPath inodesInPath, Block block,
|
FSDirectory fsd, String path, INodesInPath inodesInPath, Block block,
|
||||||
DatanodeStorageInfo[] targets) throws IOException {
|
DatanodeStorageInfo[] targets) throws IOException {
|
||||||
fsd.writeLock();
|
fsd.writeLock();
|
||||||
|
@ -608,7 +608,7 @@ class FSDirWriteFileOp {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final INodeFile file = fsn.checkLease(src, clientName, inode, fileId);
|
final INodeFile file = fsn.checkLease(src, clientName, inode, fileId);
|
||||||
BlockInfoContiguous lastBlockInFile = file.getLastBlock();
|
BlockInfo lastBlockInFile = file.getLastBlock();
|
||||||
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
|
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
|
||||||
// The block that the client claims is the current last block
|
// The block that the client claims is the current last block
|
||||||
// doesn't match up with what we think is the last block. There are
|
// doesn't match up with what we think is the last block. There are
|
||||||
|
@ -636,7 +636,7 @@ class FSDirWriteFileOp {
|
||||||
// changed the namesystem state yet.
|
// changed the namesystem state yet.
|
||||||
// We run this analysis again in Part II where case 4 is impossible.
|
// We run this analysis again in Part II where case 4 is impossible.
|
||||||
|
|
||||||
BlockInfoContiguous penultimateBlock = file.getPenultimateBlock();
|
BlockInfo penultimateBlock = file.getPenultimateBlock();
|
||||||
if (previous == null &&
|
if (previous == null &&
|
||||||
lastBlockInFile != null &&
|
lastBlockInFile != null &&
|
||||||
lastBlockInFile.getNumBytes() >= file.getPreferredBlockSize() &&
|
lastBlockInFile.getNumBytes() >= file.getPreferredBlockSize() &&
|
||||||
|
@ -762,7 +762,7 @@ class FSDirWriteFileOp {
|
||||||
long id, PermissionStatus permissions, long mtime, long atime,
|
long id, PermissionStatus permissions, long mtime, long atime,
|
||||||
short replication, long preferredBlockSize, byte storagePolicyId) {
|
short replication, long preferredBlockSize, byte storagePolicyId) {
|
||||||
return new INodeFile(id, null, permissions, mtime, atime,
|
return new INodeFile(id, null, permissions, mtime, atime,
|
||||||
BlockInfoContiguous.EMPTY_ARRAY, replication, preferredBlockSize,
|
BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize,
|
||||||
storagePolicyId);
|
storagePolicyId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -802,7 +802,7 @@ class FSDirWriteFileOp {
|
||||||
DatanodeStorageInfo[] targets)
|
DatanodeStorageInfo[] targets)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert fsn.hasWriteLock();
|
assert fsn.hasWriteLock();
|
||||||
BlockInfoContiguous b = addBlock(fsn.dir, src, inodesInPath, newBlock,
|
BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock,
|
||||||
targets);
|
targets);
|
||||||
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
|
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
|
||||||
DatanodeStorageInfo.incrementBlocksScheduled(targets);
|
DatanodeStorageInfo.incrementBlocksScheduled(targets);
|
||||||
|
|
|
@ -33,10 +33,8 @@ import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||||
|
@ -51,7 +49,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
|
@ -919,7 +917,7 @@ public class FSDirectory implements Closeable {
|
||||||
unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);
|
unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);
|
||||||
|
|
||||||
if(! onBlockBoundary) {
|
if(! onBlockBoundary) {
|
||||||
BlockInfoContiguous oldBlock = file.getLastBlock();
|
BlockInfo oldBlock = file.getLastBlock();
|
||||||
Block tBlk =
|
Block tBlk =
|
||||||
getFSNamesystem().prepareFileForTruncate(iip,
|
getFSNamesystem().prepareFileForTruncate(iip,
|
||||||
clientName, clientMachine, file.computeFileSize() - newLength,
|
clientName, clientMachine, file.computeFileSize() - newLength,
|
||||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
|
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
|
||||||
|
@ -773,10 +773,10 @@ public class FSEditLog implements LogsPurgeable {
|
||||||
|
|
||||||
public void logAddBlock(String path, INodeFile file) {
|
public void logAddBlock(String path, INodeFile file) {
|
||||||
Preconditions.checkArgument(file.isUnderConstruction());
|
Preconditions.checkArgument(file.isUnderConstruction());
|
||||||
BlockInfoContiguous[] blocks = file.getBlocks();
|
BlockInfo[] blocks = file.getBlocks();
|
||||||
Preconditions.checkState(blocks != null && blocks.length > 0);
|
Preconditions.checkState(blocks != null && blocks.length > 0);
|
||||||
BlockInfoContiguous pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
|
BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
|
||||||
BlockInfoContiguous lastBlock = blocks[blocks.length - 1];
|
BlockInfo lastBlock = blocks[blocks.length - 1];
|
||||||
AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
|
AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
|
||||||
.setPenultimateBlock(pBlock).setLastBlock(lastBlock);
|
.setPenultimateBlock(pBlock).setLastBlock(lastBlock);
|
||||||
logEdit(op);
|
logEdit(op);
|
||||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
|
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
|
||||||
|
@ -942,7 +942,7 @@ public class FSEditLogLoader {
|
||||||
*/
|
*/
|
||||||
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
|
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
BlockInfoContiguous[] oldBlocks = file.getBlocks();
|
BlockInfo[] oldBlocks = file.getBlocks();
|
||||||
Block pBlock = op.getPenultimateBlock();
|
Block pBlock = op.getPenultimateBlock();
|
||||||
Block newBlock= op.getLastBlock();
|
Block newBlock= op.getLastBlock();
|
||||||
|
|
||||||
|
@ -968,7 +968,7 @@ public class FSEditLogLoader {
|
||||||
Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
|
Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
|
||||||
}
|
}
|
||||||
// add the new block
|
// add the new block
|
||||||
BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
|
BlockInfo newBI = new BlockInfoContiguousUnderConstruction(
|
||||||
newBlock, file.getPreferredBlockReplication());
|
newBlock, file.getPreferredBlockReplication());
|
||||||
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
|
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
|
||||||
file.addBlock(newBI);
|
file.addBlock(newBI);
|
||||||
|
@ -982,7 +982,7 @@ public class FSEditLogLoader {
|
||||||
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
|
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
|
||||||
INodesInPath iip, INodeFile file) throws IOException {
|
INodesInPath iip, INodeFile file) throws IOException {
|
||||||
// Update its block list
|
// Update its block list
|
||||||
BlockInfoContiguous[] oldBlocks = file.getBlocks();
|
BlockInfo[] oldBlocks = file.getBlocks();
|
||||||
Block[] newBlocks = op.getBlocks();
|
Block[] newBlocks = op.getBlocks();
|
||||||
String path = op.getPath();
|
String path = op.getPath();
|
||||||
|
|
||||||
|
@ -991,7 +991,7 @@ public class FSEditLogLoader {
|
||||||
|
|
||||||
// First, update blocks in common
|
// First, update blocks in common
|
||||||
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
|
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
|
||||||
BlockInfoContiguous oldBlock = oldBlocks[i];
|
BlockInfo oldBlock = oldBlocks[i];
|
||||||
Block newBlock = newBlocks[i];
|
Block newBlock = newBlocks[i];
|
||||||
|
|
||||||
boolean isLastBlock = i == newBlocks.length - 1;
|
boolean isLastBlock = i == newBlocks.length - 1;
|
||||||
|
@ -1043,7 +1043,7 @@ public class FSEditLogLoader {
|
||||||
// We're adding blocks
|
// We're adding blocks
|
||||||
for (int i = oldBlocks.length; i < newBlocks.length; i++) {
|
for (int i = oldBlocks.length; i < newBlocks.length; i++) {
|
||||||
Block newBlock = newBlocks[i];
|
Block newBlock = newBlocks[i];
|
||||||
BlockInfoContiguous newBI;
|
BlockInfo newBI;
|
||||||
if (!op.shouldCompleteLastBlock()) {
|
if (!op.shouldCompleteLastBlock()) {
|
||||||
// TODO: shouldn't this only be true for the last block?
|
// TODO: shouldn't this only be true for the last block?
|
||||||
// what about an old-version fsync() where fsync isn't called
|
// what about an old-version fsync() where fsync isn't called
|
||||||
|
@ -1055,7 +1055,7 @@ public class FSEditLogLoader {
|
||||||
// is only executed when loading edits written by prior
|
// is only executed when loading edits written by prior
|
||||||
// versions of Hadoop. Current versions always log
|
// versions of Hadoop. Current versions always log
|
||||||
// OP_ADD operations as each block is allocated.
|
// OP_ADD operations as each block is allocated.
|
||||||
newBI = new BlockInfoContiguous(newBlock,
|
newBI = new BlockInfo(newBlock,
|
||||||
file.getPreferredBlockReplication());
|
file.getPreferredBlockReplication());
|
||||||
}
|
}
|
||||||
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
|
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
|
||||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
|
@ -686,7 +686,7 @@ public class FSImageFormat {
|
||||||
|
|
||||||
public void updateBlocksMap(INodeFile file) {
|
public void updateBlocksMap(INodeFile file) {
|
||||||
// Add file->block mapping
|
// Add file->block mapping
|
||||||
final BlockInfoContiguous[] blocks = file.getBlocks();
|
final BlockInfo[] blocks = file.getBlocks();
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
final BlockManager bm = namesystem.getBlockManager();
|
final BlockManager bm = namesystem.getBlockManager();
|
||||||
for (int i = 0; i < blocks.length; i++) {
|
for (int i = 0; i < blocks.length; i++) {
|
||||||
|
@ -753,9 +753,9 @@ public class FSImageFormat {
|
||||||
// file
|
// file
|
||||||
|
|
||||||
// read blocks
|
// read blocks
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[numBlocks];
|
BlockInfo[] blocks = new BlockInfo[numBlocks];
|
||||||
for (int j = 0; j < numBlocks; j++) {
|
for (int j = 0; j < numBlocks; j++) {
|
||||||
blocks[j] = new BlockInfoContiguous(replication);
|
blocks[j] = new BlockInfo(replication);
|
||||||
blocks[j].readFields(in);
|
blocks[j].readFields(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -775,7 +775,7 @@ public class FSImageFormat {
|
||||||
clientMachine = FSImageSerialization.readString(in);
|
clientMachine = FSImageSerialization.readString(in);
|
||||||
// convert the last block to BlockUC
|
// convert the last block to BlockUC
|
||||||
if (blocks.length > 0) {
|
if (blocks.length > 0) {
|
||||||
BlockInfoContiguous lastBlk = blocks[blocks.length - 1];
|
BlockInfo lastBlk = blocks[blocks.length - 1];
|
||||||
blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction(
|
blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction(
|
||||||
lastBlk, replication);
|
lastBlk, replication);
|
||||||
}
|
}
|
||||||
|
@ -958,9 +958,9 @@ public class FSImageFormat {
|
||||||
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
|
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
|
||||||
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
||||||
if (oldnode.numBlocks() > 0) {
|
if (oldnode.numBlocks() > 0) {
|
||||||
BlockInfoContiguous ucBlock = cons.getLastBlock();
|
BlockInfo ucBlock = cons.getLastBlock();
|
||||||
// we do not replace the inode, just replace the last block of oldnode
|
// we do not replace the inode, just replace the last block of oldnode
|
||||||
BlockInfoContiguous info = namesystem.getBlockManager().addBlockCollection(
|
BlockInfo info = namesystem.getBlockManager().addBlockCollection(
|
||||||
ucBlock, oldnode);
|
ucBlock, oldnode);
|
||||||
oldnode.setBlock(oldnode.numBlocks() - 1, info);
|
oldnode.setBlock(oldnode.numBlocks() - 1, info);
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -43,7 +42,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
|
||||||
|
@ -211,7 +210,7 @@ public final class FSImageFormatPBINode {
|
||||||
|
|
||||||
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
|
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
|
||||||
// Add file->block mapping
|
// Add file->block mapping
|
||||||
final BlockInfoContiguous[] blocks = file.getBlocks();
|
final BlockInfo[] blocks = file.getBlocks();
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
for (int i = 0; i < blocks.length; i++) {
|
for (int i = 0; i < blocks.length; i++) {
|
||||||
file.setBlock(i, bm.addBlockCollection(blocks[i], file));
|
file.setBlock(i, bm.addBlockCollection(blocks[i], file));
|
||||||
|
@ -324,9 +323,9 @@ public final class FSImageFormatPBINode {
|
||||||
short replication = (short) f.getReplication();
|
short replication = (short) f.getReplication();
|
||||||
LoaderContext state = parent.getLoaderContext();
|
LoaderContext state = parent.getLoaderContext();
|
||||||
|
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.size()];
|
BlockInfo[] blocks = new BlockInfo[bp.size()];
|
||||||
for (int i = 0, e = bp.size(); i < e; ++i) {
|
for (int i = 0, e = bp.size(); i < e; ++i) {
|
||||||
blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication);
|
blocks[i] = new BlockInfo(PBHelper.convert(bp.get(i)), replication);
|
||||||
}
|
}
|
||||||
final PermissionStatus permissions = loadPermission(f.getPermission(),
|
final PermissionStatus permissions = loadPermission(f.getPermission(),
|
||||||
parent.getLoaderContext().getStringTable());
|
parent.getLoaderContext().getStringTable());
|
||||||
|
@ -352,7 +351,7 @@ public final class FSImageFormatPBINode {
|
||||||
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
|
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
|
||||||
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
||||||
if (blocks.length > 0) {
|
if (blocks.length > 0) {
|
||||||
BlockInfoContiguous lastBlk = file.getLastBlock();
|
BlockInfo lastBlk = file.getLastBlock();
|
||||||
// replace the last block of file
|
// replace the last block of file
|
||||||
file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction(
|
file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction(
|
||||||
lastBlk, replication));
|
lastBlk, replication));
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||||
|
@ -126,12 +126,12 @@ public class FSImageSerialization {
|
||||||
long preferredBlockSize = in.readLong();
|
long preferredBlockSize = in.readLong();
|
||||||
|
|
||||||
int numBlocks = in.readInt();
|
int numBlocks = in.readInt();
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[numBlocks];
|
BlockInfo[] blocks = new BlockInfo[numBlocks];
|
||||||
Block blk = new Block();
|
Block blk = new Block();
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (; i < numBlocks-1; i++) {
|
for (; i < numBlocks-1; i++) {
|
||||||
blk.readFields(in);
|
blk.readFields(in);
|
||||||
blocks[i] = new BlockInfoContiguous(blk, blockReplication);
|
blocks[i] = new BlockInfo(blk, blockReplication);
|
||||||
}
|
}
|
||||||
// last block is UNDER_CONSTRUCTION
|
// last block is UNDER_CONSTRUCTION
|
||||||
if(numBlocks > 0) {
|
if(numBlocks > 0) {
|
||||||
|
|
|
@ -203,7 +203,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
|
@ -2016,7 +2016,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the file is already being truncated with the same length
|
// Check if the file is already being truncated with the same length
|
||||||
final BlockInfoContiguous last = file.getLastBlock();
|
final BlockInfo last = file.getLastBlock();
|
||||||
if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
|
if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
|
||||||
final Block truncateBlock
|
final Block truncateBlock
|
||||||
= ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();
|
= ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();
|
||||||
|
@ -2090,7 +2090,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
leaseManager.addLease(
|
leaseManager.addLease(
|
||||||
file.getFileUnderConstructionFeature().getClientName(), file.getId());
|
file.getFileUnderConstructionFeature().getClientName(), file.getId());
|
||||||
boolean shouldRecoverNow = (newBlock == null);
|
boolean shouldRecoverNow = (newBlock == null);
|
||||||
BlockInfoContiguous oldBlock = file.getLastBlock();
|
BlockInfo oldBlock = file.getLastBlock();
|
||||||
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
|
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
|
||||||
if(newBlock == null) {
|
if(newBlock == null) {
|
||||||
newBlock = (shouldCopyOnTruncate) ? createNewBlock() :
|
newBlock = (shouldCopyOnTruncate) ? createNewBlock() :
|
||||||
|
@ -2141,7 +2141,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
* Defines if a replica needs to be copied on truncate or
|
* Defines if a replica needs to be copied on truncate or
|
||||||
* can be truncated in place.
|
* can be truncated in place.
|
||||||
*/
|
*/
|
||||||
boolean shouldCopyOnTruncate(INodeFile file, BlockInfoContiguous blk) {
|
boolean shouldCopyOnTruncate(INodeFile file, BlockInfo blk) {
|
||||||
if(!isUpgradeFinalized()) {
|
if(!isUpgradeFinalized()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2505,7 +2505,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder,
|
recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder,
|
||||||
clientMachine, false);
|
clientMachine, false);
|
||||||
|
|
||||||
final BlockInfoContiguous lastBlock = myFile.getLastBlock();
|
final BlockInfo lastBlock = myFile.getLastBlock();
|
||||||
// Check that the block has at least minimum replication.
|
// Check that the block has at least minimum replication.
|
||||||
if(lastBlock != null && lastBlock.isComplete() &&
|
if(lastBlock != null && lastBlock.isComplete() &&
|
||||||
!getBlockManager().isSufficientlyReplicated(lastBlock)) {
|
!getBlockManager().isSufficientlyReplicated(lastBlock)) {
|
||||||
|
@ -2561,7 +2561,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
BlockInfoContiguous lastBlock = file.getLastBlock();
|
BlockInfo lastBlock = file.getLastBlock();
|
||||||
if (lastBlock != null) {
|
if (lastBlock != null) {
|
||||||
ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
|
ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
|
||||||
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
|
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
|
||||||
|
@ -2603,7 +2603,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
/** Compute quota change for converting a complete block to a UC block */
|
/** Compute quota change for converting a complete block to a UC block */
|
||||||
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
|
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
|
||||||
final QuotaCounts delta = new QuotaCounts.Builder().build();
|
final QuotaCounts delta = new QuotaCounts.Builder().build();
|
||||||
final BlockInfoContiguous lastBlock = file.getLastBlock();
|
final BlockInfo lastBlock = file.getLastBlock();
|
||||||
if (lastBlock != null) {
|
if (lastBlock != null) {
|
||||||
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
|
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
|
||||||
final short repl = file.getPreferredBlockReplication();
|
final short repl = file.getPreferredBlockReplication();
|
||||||
|
@ -2740,7 +2740,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
op.getExceptionMessage(src, holder, clientMachine,
|
op.getExceptionMessage(src, holder, clientMachine,
|
||||||
"lease recovery is in progress. Try again later."));
|
"lease recovery is in progress. Try again later."));
|
||||||
} else {
|
} else {
|
||||||
final BlockInfoContiguous lastBlock = file.getLastBlock();
|
final BlockInfo lastBlock = file.getLastBlock();
|
||||||
if (lastBlock != null
|
if (lastBlock != null
|
||||||
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
|
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
|
||||||
throw new RecoveryInProgressException(
|
throw new RecoveryInProgressException(
|
||||||
|
@ -3065,10 +3065,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
.getBlocks());
|
.getBlocks());
|
||||||
} else {
|
} else {
|
||||||
// check the penultimate block of this file
|
// check the penultimate block of this file
|
||||||
BlockInfoContiguous b = v.getPenultimateBlock();
|
BlockInfo b = v.getPenultimateBlock();
|
||||||
return b == null ||
|
return b == null ||
|
||||||
blockManager.checkBlocksProperlyReplicated(
|
blockManager.checkBlocksProperlyReplicated(
|
||||||
src, new BlockInfoContiguous[] { b });
|
src, new BlockInfo[] { b });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3242,7 +3242,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
for (Block b : blocks.getToDeleteList()) {
|
for (Block b : blocks.getToDeleteList()) {
|
||||||
if (trackBlockCounts) {
|
if (trackBlockCounts) {
|
||||||
BlockInfoContiguous bi = getStoredBlock(b);
|
BlockInfo bi = getStoredBlock(b);
|
||||||
if (bi.isComplete()) {
|
if (bi.isComplete()) {
|
||||||
numRemovedComplete++;
|
numRemovedComplete++;
|
||||||
if (bi.numNodes() >= blockManager.minReplication) {
|
if (bi.numNodes() >= blockManager.minReplication) {
|
||||||
|
@ -3466,10 +3466,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
final INodeFile pendingFile = iip.getLastINode().asFile();
|
final INodeFile pendingFile = iip.getLastINode().asFile();
|
||||||
int nrBlocks = pendingFile.numBlocks();
|
int nrBlocks = pendingFile.numBlocks();
|
||||||
BlockInfoContiguous[] blocks = pendingFile.getBlocks();
|
BlockInfo[] blocks = pendingFile.getBlocks();
|
||||||
|
|
||||||
int nrCompleteBlocks;
|
int nrCompleteBlocks;
|
||||||
BlockInfoContiguous curBlock = null;
|
BlockInfo curBlock = null;
|
||||||
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
|
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
|
||||||
curBlock = blocks[nrCompleteBlocks];
|
curBlock = blocks[nrCompleteBlocks];
|
||||||
if(!curBlock.isComplete())
|
if(!curBlock.isComplete())
|
||||||
|
@ -3504,9 +3504,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
// The last block is not COMPLETE, and
|
// The last block is not COMPLETE, and
|
||||||
// that the penultimate block if exists is either COMPLETE or COMMITTED
|
// that the penultimate block if exists is either COMPLETE or COMMITTED
|
||||||
final BlockInfoContiguous lastBlock = pendingFile.getLastBlock();
|
final BlockInfo lastBlock = pendingFile.getLastBlock();
|
||||||
BlockUCState lastBlockState = lastBlock.getBlockUCState();
|
BlockUCState lastBlockState = lastBlock.getBlockUCState();
|
||||||
BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
|
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
|
||||||
|
|
||||||
// If penultimate block doesn't exist then its minReplication is met
|
// If penultimate block doesn't exist then its minReplication is met
|
||||||
boolean penultimateBlockMinReplication = penultimateBlock == null ? true :
|
boolean penultimateBlockMinReplication = penultimateBlock == null ? true :
|
||||||
|
@ -3650,7 +3650,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
BlockInfoContiguous getStoredBlock(Block block) {
|
BlockInfo getStoredBlock(Block block) {
|
||||||
return blockManager.getStoredBlock(block);
|
return blockManager.getStoredBlock(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3710,7 +3710,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
checkNameNodeSafeMode(
|
checkNameNodeSafeMode(
|
||||||
"Cannot commitBlockSynchronization while in safe mode");
|
"Cannot commitBlockSynchronization while in safe mode");
|
||||||
final BlockInfoContiguous storedBlock = getStoredBlock(
|
final BlockInfo storedBlock = getStoredBlock(
|
||||||
ExtendedBlock.getLocalBlock(oldBlock));
|
ExtendedBlock.getLocalBlock(oldBlock));
|
||||||
if (storedBlock == null) {
|
if (storedBlock == null) {
|
||||||
if (deleteblock) {
|
if (deleteblock) {
|
||||||
|
@ -3871,7 +3871,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
|
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final INodesInPath iip = INodesInPath.fromINode(pendingFile);
|
final INodesInPath iip = INodesInPath.fromINode(pendingFile);
|
||||||
final String src = iip.getPath();
|
final String src = iip.getPath();
|
||||||
|
@ -4162,7 +4162,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
|
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
Block b = it.next();
|
Block b = it.next();
|
||||||
BlockInfoContiguous blockInfo = blockManager.getStoredBlock(b);
|
BlockInfo blockInfo = blockManager.getStoredBlock(b);
|
||||||
if (blockInfo.getBlockCollection().getStoragePolicyID()
|
if (blockInfo.getBlockCollection().getStoragePolicyID()
|
||||||
== lpPolicy.getId()) {
|
== lpPolicy.getId()) {
|
||||||
filesToDelete.add(blockInfo.getBlockCollection());
|
filesToDelete.add(blockInfo.getBlockCollection());
|
||||||
|
@ -5105,7 +5105,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
SafeModeInfo safeMode = this.safeMode;
|
SafeModeInfo safeMode = this.safeMode;
|
||||||
if (safeMode == null) // mostly true
|
if (safeMode == null) // mostly true
|
||||||
return;
|
return;
|
||||||
BlockInfoContiguous storedBlock = getStoredBlock(b);
|
BlockInfo storedBlock = getStoredBlock(b);
|
||||||
if (storedBlock.isComplete()) {
|
if (storedBlock.isComplete()) {
|
||||||
safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
|
safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
|
||||||
}
|
}
|
||||||
|
@ -5665,7 +5665,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
+ "access token for block " + block);
|
+ "access token for block " + block);
|
||||||
|
|
||||||
// check stored block state
|
// check stored block state
|
||||||
BlockInfoContiguous storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
|
BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
|
||||||
if (storedBlock == null ||
|
if (storedBlock == null ||
|
||||||
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
|
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
|
||||||
throw new IOException(block +
|
throw new IOException(block +
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ public class FileUnderConstructionFeature implements INode.Feature {
|
||||||
*/
|
*/
|
||||||
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
|
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
BlockInfoContiguous lastBlock = f.getLastBlock();
|
BlockInfo lastBlock = f.getLastBlock();
|
||||||
assert (lastBlock != null) : "The last block for path "
|
assert (lastBlock != null) : "The last block for path "
|
||||||
+ f.getFullPathName() + " is null when updating its length";
|
+ f.getFullPathName() + " is null when updating its length";
|
||||||
assert (lastBlock instanceof BlockInfoContiguousUnderConstruction)
|
assert (lastBlock instanceof BlockInfoContiguousUnderConstruction)
|
||||||
|
@ -74,7 +74,7 @@ public class FileUnderConstructionFeature implements INode.Feature {
|
||||||
*/
|
*/
|
||||||
void cleanZeroSizeBlock(final INodeFile f,
|
void cleanZeroSizeBlock(final INodeFile f,
|
||||||
final BlocksMapUpdateInfo collectedBlocks) {
|
final BlocksMapUpdateInfo collectedBlocks) {
|
||||||
final BlockInfoContiguous[] blocks = f.getBlocks();
|
final BlockInfo[] blocks = f.getBlocks();
|
||||||
if (blocks != null && blocks.length > 0
|
if (blocks != null && blocks.length > 0
|
||||||
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
|
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
|
||||||
BlockInfoContiguousUnderConstruction lastUC =
|
BlockInfoContiguousUnderConstruction lastUC =
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
|
@ -121,17 +121,17 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
|
|
||||||
private long header = 0L;
|
private long header = 0L;
|
||||||
|
|
||||||
private BlockInfoContiguous[] blocks;
|
private BlockInfo[] blocks;
|
||||||
|
|
||||||
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
||||||
long atime, BlockInfoContiguous[] blklist, short replication,
|
long atime, BlockInfo[] blklist, short replication,
|
||||||
long preferredBlockSize) {
|
long preferredBlockSize) {
|
||||||
this(id, name, permissions, mtime, atime, blklist, replication,
|
this(id, name, permissions, mtime, atime, blklist, replication,
|
||||||
preferredBlockSize, (byte) 0);
|
preferredBlockSize, (byte) 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
||||||
long atime, BlockInfoContiguous[] blklist, short replication,
|
long atime, BlockInfo[] blklist, short replication,
|
||||||
long preferredBlockSize, byte storagePolicyID) {
|
long preferredBlockSize, byte storagePolicyID) {
|
||||||
super(id, name, permissions, mtime, atime);
|
super(id, name, permissions, mtime, atime);
|
||||||
header = HeaderFormat.toLong(preferredBlockSize, replication, storagePolicyID);
|
header = HeaderFormat.toLong(preferredBlockSize, replication, storagePolicyID);
|
||||||
|
@ -226,13 +226,13 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // BlockCollection
|
@Override // BlockCollection
|
||||||
public void setBlock(int index, BlockInfoContiguous blk) {
|
public void setBlock(int index, BlockInfo blk) {
|
||||||
this.blocks[index] = blk;
|
this.blocks[index] = blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // BlockCollection, the file should be under construction
|
@Override // BlockCollection, the file should be under construction
|
||||||
public BlockInfoContiguousUnderConstruction setLastBlock(
|
public BlockInfoContiguousUnderConstruction setLastBlock(
|
||||||
BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
|
BlockInfo lastBlock, DatanodeStorageInfo[] locations)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkState(isUnderConstruction(),
|
Preconditions.checkState(isUnderConstruction(),
|
||||||
"file is no longer under construction");
|
"file is no longer under construction");
|
||||||
|
@ -265,7 +265,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
BlockInfoContiguousUnderConstruction uc =
|
BlockInfoContiguousUnderConstruction uc =
|
||||||
(BlockInfoContiguousUnderConstruction)blocks[size_1];
|
(BlockInfoContiguousUnderConstruction)blocks[size_1];
|
||||||
//copy to a new list
|
//copy to a new list
|
||||||
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
|
BlockInfo[] newlist = new BlockInfo[size_1];
|
||||||
System.arraycopy(blocks, 0, newlist, 0, size_1);
|
System.arraycopy(blocks, 0, newlist, 0, size_1);
|
||||||
setBlocks(newlist);
|
setBlocks(newlist);
|
||||||
return uc;
|
return uc;
|
||||||
|
@ -420,16 +420,16 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
|
|
||||||
/** @return the blocks of the file. */
|
/** @return the blocks of the file. */
|
||||||
@Override
|
@Override
|
||||||
public BlockInfoContiguous[] getBlocks() {
|
public BlockInfo[] getBlocks() {
|
||||||
return this.blocks;
|
return this.blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return blocks of the file corresponding to the snapshot. */
|
/** @return blocks of the file corresponding to the snapshot. */
|
||||||
public BlockInfoContiguous[] getBlocks(int snapshot) {
|
public BlockInfo[] getBlocks(int snapshot) {
|
||||||
if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
|
if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
|
||||||
return getBlocks();
|
return getBlocks();
|
||||||
FileDiff diff = getDiffs().getDiffById(snapshot);
|
FileDiff diff = getDiffs().getDiffById(snapshot);
|
||||||
BlockInfoContiguous[] snapshotBlocks =
|
BlockInfo[] snapshotBlocks =
|
||||||
diff == null ? getBlocks() : diff.getBlocks();
|
diff == null ? getBlocks() : diff.getBlocks();
|
||||||
if(snapshotBlocks != null)
|
if(snapshotBlocks != null)
|
||||||
return snapshotBlocks;
|
return snapshotBlocks;
|
||||||
|
@ -441,7 +441,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
|
|
||||||
void updateBlockCollection() {
|
void updateBlockCollection() {
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
for(BlockInfoContiguous b : blocks) {
|
for(BlockInfo b : blocks) {
|
||||||
b.setBlockCollection(this);
|
b.setBlockCollection(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -457,8 +457,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
totalAddedBlocks += f.blocks.length;
|
totalAddedBlocks += f.blocks.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfoContiguous[] newlist =
|
BlockInfo[] newlist =
|
||||||
new BlockInfoContiguous[size + totalAddedBlocks];
|
new BlockInfo[size + totalAddedBlocks];
|
||||||
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
||||||
|
|
||||||
for(INodeFile in: inodes) {
|
for(INodeFile in: inodes) {
|
||||||
|
@ -473,12 +473,12 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
/**
|
/**
|
||||||
* add a block to the block list
|
* add a block to the block list
|
||||||
*/
|
*/
|
||||||
void addBlock(BlockInfoContiguous newblock) {
|
void addBlock(BlockInfo newblock) {
|
||||||
if (this.blocks == null) {
|
if (this.blocks == null) {
|
||||||
this.setBlocks(new BlockInfoContiguous[]{newblock});
|
this.setBlocks(new BlockInfo[]{newblock});
|
||||||
} else {
|
} else {
|
||||||
int size = this.blocks.length;
|
int size = this.blocks.length;
|
||||||
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + 1];
|
BlockInfo[] newlist = new BlockInfo[size + 1];
|
||||||
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
||||||
newlist[size] = newblock;
|
newlist[size] = newblock;
|
||||||
this.setBlocks(newlist);
|
this.setBlocks(newlist);
|
||||||
|
@ -486,7 +486,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Set the blocks. */
|
/** Set the blocks. */
|
||||||
public void setBlocks(BlockInfoContiguous[] blocks) {
|
public void setBlocks(BlockInfo[] blocks) {
|
||||||
this.blocks = blocks;
|
this.blocks = blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -538,7 +538,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
|
|
||||||
public void clearFile(ReclaimContext reclaimContext) {
|
public void clearFile(ReclaimContext reclaimContext) {
|
||||||
if (blocks != null && reclaimContext.collectedBlocks != null) {
|
if (blocks != null && reclaimContext.collectedBlocks != null) {
|
||||||
for (BlockInfoContiguous blk : blocks) {
|
for (BlockInfo blk : blocks) {
|
||||||
reclaimContext.collectedBlocks.addDeleteBlock(blk);
|
reclaimContext.collectedBlocks.addDeleteBlock(blk);
|
||||||
blk.setBlockCollection(null);
|
blk.setBlockCollection(null);
|
||||||
}
|
}
|
||||||
|
@ -710,16 +710,16 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
*/
|
*/
|
||||||
public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
|
public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
|
||||||
QuotaCounts counts = new QuotaCounts.Builder().build();
|
QuotaCounts counts = new QuotaCounts.Builder().build();
|
||||||
final Iterable<BlockInfoContiguous> blocks;
|
final Iterable<BlockInfo> blocks;
|
||||||
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
||||||
if (sf == null) {
|
if (sf == null) {
|
||||||
blocks = Arrays.asList(getBlocks());
|
blocks = Arrays.asList(getBlocks());
|
||||||
} else {
|
} else {
|
||||||
// Collect all distinct blocks
|
// Collect all distinct blocks
|
||||||
Set<BlockInfoContiguous> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
|
Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
|
||||||
List<FileDiff> diffs = sf.getDiffs().asList();
|
List<FileDiff> diffs = sf.getDiffs().asList();
|
||||||
for(FileDiff diff : diffs) {
|
for(FileDiff diff : diffs) {
|
||||||
BlockInfoContiguous[] diffBlocks = diff.getBlocks();
|
BlockInfo[] diffBlocks = diff.getBlocks();
|
||||||
if (diffBlocks != null) {
|
if (diffBlocks != null) {
|
||||||
allBlocks.addAll(Arrays.asList(diffBlocks));
|
allBlocks.addAll(Arrays.asList(diffBlocks));
|
||||||
}
|
}
|
||||||
|
@ -728,7 +728,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
final short replication = getPreferredBlockReplication();
|
final short replication = getPreferredBlockReplication();
|
||||||
for (BlockInfoContiguous b : blocks) {
|
for (BlockInfo b : blocks) {
|
||||||
long blockSize = b.isComplete() ? b.getNumBytes() :
|
long blockSize = b.isComplete() ? b.getNumBytes() :
|
||||||
getPreferredBlockSize();
|
getPreferredBlockSize();
|
||||||
counts.addStorageSpace(blockSize * replication);
|
counts.addStorageSpace(blockSize * replication);
|
||||||
|
@ -747,7 +747,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
/**
|
/**
|
||||||
* Return the penultimate allocated block for this file.
|
* Return the penultimate allocated block for this file.
|
||||||
*/
|
*/
|
||||||
BlockInfoContiguous getPenultimateBlock() {
|
BlockInfo getPenultimateBlock() {
|
||||||
if (blocks == null || blocks.length <= 1) {
|
if (blocks == null || blocks.length <= 1) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -755,7 +755,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlockInfoContiguous getLastBlock() {
|
public BlockInfo getLastBlock() {
|
||||||
return blocks == null || blocks.length == 0? null: blocks[blocks.length-1];
|
return blocks == null || blocks.length == 0? null: blocks[blocks.length-1];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -782,7 +782,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
*/
|
*/
|
||||||
public long collectBlocksBeyondMax(final long max,
|
public long collectBlocksBeyondMax(final long max,
|
||||||
final BlocksMapUpdateInfo collectedBlocks) {
|
final BlocksMapUpdateInfo collectedBlocks) {
|
||||||
final BlockInfoContiguous[] oldBlocks = getBlocks();
|
final BlockInfo[] oldBlocks = getBlocks();
|
||||||
if (oldBlocks == null)
|
if (oldBlocks == null)
|
||||||
return 0;
|
return 0;
|
||||||
// find the minimum n such that the size of the first n blocks > max
|
// find the minimum n such that the size of the first n blocks > max
|
||||||
|
@ -814,17 +814,17 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
void computeQuotaDeltaForTruncate(
|
void computeQuotaDeltaForTruncate(
|
||||||
long newLength, BlockStoragePolicy bsps,
|
long newLength, BlockStoragePolicy bsps,
|
||||||
QuotaCounts delta) {
|
QuotaCounts delta) {
|
||||||
final BlockInfoContiguous[] blocks = getBlocks();
|
final BlockInfo[] blocks = getBlocks();
|
||||||
if (blocks == null || blocks.length == 0) {
|
if (blocks == null || blocks.length == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
long size = 0;
|
long size = 0;
|
||||||
for (BlockInfoContiguous b : blocks) {
|
for (BlockInfo b : blocks) {
|
||||||
size += b.getNumBytes();
|
size += b.getNumBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfoContiguous[] sblocks = null;
|
BlockInfo[] sblocks = null;
|
||||||
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
||||||
if (sf != null) {
|
if (sf != null) {
|
||||||
FileDiff diff = sf.getDiffs().getLast();
|
FileDiff diff = sf.getDiffs().getLast();
|
||||||
|
@ -833,7 +833,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
|
|
||||||
for (int i = blocks.length - 1; i >= 0 && size > newLength;
|
for (int i = blocks.length - 1; i >= 0 && size > newLength;
|
||||||
size -= blocks[i].getNumBytes(), --i) {
|
size -= blocks[i].getNumBytes(), --i) {
|
||||||
BlockInfoContiguous bi = blocks[i];
|
BlockInfo bi = blocks[i];
|
||||||
long truncatedBytes;
|
long truncatedBytes;
|
||||||
if (size - newLength < bi.getNumBytes()) {
|
if (size - newLength < bi.getNumBytes()) {
|
||||||
// Record a full block as the last block will be copied during
|
// Record a full block as the last block will be copied during
|
||||||
|
@ -863,20 +863,20 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
void truncateBlocksTo(int n) {
|
void truncateBlocksTo(int n) {
|
||||||
final BlockInfoContiguous[] newBlocks;
|
final BlockInfo[] newBlocks;
|
||||||
if (n == 0) {
|
if (n == 0) {
|
||||||
newBlocks = BlockInfoContiguous.EMPTY_ARRAY;
|
newBlocks = BlockInfo.EMPTY_ARRAY;
|
||||||
} else {
|
} else {
|
||||||
newBlocks = new BlockInfoContiguous[n];
|
newBlocks = new BlockInfo[n];
|
||||||
System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
|
System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
|
||||||
}
|
}
|
||||||
// set new blocks
|
// set new blocks
|
||||||
setBlocks(newBlocks);
|
setBlocks(newBlocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks,
|
public void collectBlocksBeyondSnapshot(BlockInfo[] snapshotBlocks,
|
||||||
BlocksMapUpdateInfo collectedBlocks) {
|
BlocksMapUpdateInfo collectedBlocks) {
|
||||||
BlockInfoContiguous[] oldBlocks = getBlocks();
|
BlockInfo[] oldBlocks = getBlocks();
|
||||||
if(snapshotBlocks == null || oldBlocks == null)
|
if(snapshotBlocks == null || oldBlocks == null)
|
||||||
return;
|
return;
|
||||||
// Skip blocks in common between the file and the snapshot
|
// Skip blocks in common between the file and the snapshot
|
||||||
|
@ -900,7 +900,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
||||||
if(sf == null)
|
if(sf == null)
|
||||||
return;
|
return;
|
||||||
BlockInfoContiguous[] snapshotBlocks =
|
BlockInfo[] snapshotBlocks =
|
||||||
getDiffs().findEarlierSnapshotBlocks(snapshotId);
|
getDiffs().findEarlierSnapshotBlocks(snapshotId);
|
||||||
if(snapshotBlocks == null)
|
if(snapshotBlocks == null)
|
||||||
return;
|
return;
|
||||||
|
@ -914,12 +914,12 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
/**
|
/**
|
||||||
* @return true if the block is contained in a snapshot or false otherwise.
|
* @return true if the block is contained in a snapshot or false otherwise.
|
||||||
*/
|
*/
|
||||||
boolean isBlockInLatestSnapshot(BlockInfoContiguous block) {
|
boolean isBlockInLatestSnapshot(BlockInfo block) {
|
||||||
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
|
||||||
if (sf == null || sf.getDiffs() == null) {
|
if (sf == null || sf.getDiffs() == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
BlockInfoContiguous[] snapshotBlocks = getDiffs()
|
BlockInfo[] snapshotBlocks = getDiffs()
|
||||||
.findEarlierSnapshotBlocks(getDiffs().getLastSnapshotId());
|
.findEarlierSnapshotBlocks(getDiffs().getLastSnapshotId());
|
||||||
return snapshotBlocks != null &&
|
return snapshotBlocks != null &&
|
||||||
Arrays.asList(snapshotBlocks).contains(block);
|
Arrays.asList(snapshotBlocks).contains(block);
|
||||||
|
|
|
@ -33,7 +33,7 @@ import java.util.TreeMap;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
|
|
||||||
|
@ -108,11 +108,11 @@ public class LeaseManager {
|
||||||
for (Long id : getINodeIdWithLeases()) {
|
for (Long id : getINodeIdWithLeases()) {
|
||||||
final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
|
final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
|
||||||
Preconditions.checkState(cons.isUnderConstruction());
|
Preconditions.checkState(cons.isUnderConstruction());
|
||||||
BlockInfoContiguous[] blocks = cons.getBlocks();
|
BlockInfo[] blocks = cons.getBlocks();
|
||||||
if(blocks == null) {
|
if(blocks == null) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
for(BlockInfoContiguous b : blocks) {
|
for(BlockInfo b : blocks) {
|
||||||
if(!b.isComplete())
|
if(!b.isComplete())
|
||||||
numUCBlocks++;
|
numUCBlocks++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactor
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
|
||||||
|
@ -243,7 +243,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
//get blockInfo
|
//get blockInfo
|
||||||
Block block = new Block(Block.getBlockId(blockId));
|
Block block = new Block(Block.getBlockId(blockId));
|
||||||
//find which file this block belongs to
|
//find which file this block belongs to
|
||||||
BlockInfoContiguous blockInfo = bm.getStoredBlock(block);
|
BlockInfo blockInfo = bm.getStoredBlock(block);
|
||||||
if(blockInfo == null) {
|
if(blockInfo == null) {
|
||||||
out.println("Block "+ blockId +" " + NONEXISTENT_STATUS);
|
out.println("Block "+ blockId +" " + NONEXISTENT_STATUS);
|
||||||
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);
|
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);
|
||||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat;
|
import org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
|
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
|
@ -239,13 +239,13 @@ public class FSImageFormatPBSnapshot {
|
||||||
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
|
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
|
||||||
pbf.getFileSize());
|
pbf.getFileSize());
|
||||||
List<BlockProto> bpl = pbf.getBlocksList();
|
List<BlockProto> bpl = pbf.getBlocksList();
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[bpl.size()];
|
BlockInfo[] blocks = new BlockInfo[bpl.size()];
|
||||||
for(int j = 0, e = bpl.size(); j < e; ++j) {
|
for(int j = 0, e = bpl.size(); j < e; ++j) {
|
||||||
Block blk = PBHelper.convert(bpl.get(j));
|
Block blk = PBHelper.convert(bpl.get(j));
|
||||||
BlockInfoContiguous storedBlock = fsn.getBlockManager().getStoredBlock(blk);
|
BlockInfo storedBlock = fsn.getBlockManager().getStoredBlock(blk);
|
||||||
if(storedBlock == null) {
|
if(storedBlock == null) {
|
||||||
storedBlock = fsn.getBlockManager().addBlockCollection(
|
storedBlock = fsn.getBlockManager().addBlockCollection(
|
||||||
new BlockInfoContiguous(blk, copy.getFileReplication()), file);
|
new BlockInfo(blk, copy.getFileReplication()), file);
|
||||||
}
|
}
|
||||||
blocks[j] = storedBlock;
|
blocks[j] = storedBlock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ import java.io.DataOutput;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
|
@ -38,7 +38,7 @@ public class FileDiff extends
|
||||||
/** The file size at snapshot creation time. */
|
/** The file size at snapshot creation time. */
|
||||||
private final long fileSize;
|
private final long fileSize;
|
||||||
/** A copy of the INodeFile block list. Used in truncate. */
|
/** A copy of the INodeFile block list. Used in truncate. */
|
||||||
private BlockInfoContiguous[] blocks;
|
private BlockInfo[] blocks;
|
||||||
|
|
||||||
FileDiff(int snapshotId, INodeFile file) {
|
FileDiff(int snapshotId, INodeFile file) {
|
||||||
super(snapshotId, null, null);
|
super(snapshotId, null, null);
|
||||||
|
@ -64,7 +64,7 @@ public class FileDiff extends
|
||||||
* up to the current {@link #fileSize}.
|
* up to the current {@link #fileSize}.
|
||||||
* Should be done only once.
|
* Should be done only once.
|
||||||
*/
|
*/
|
||||||
public void setBlocks(BlockInfoContiguous[] blocks) {
|
public void setBlocks(BlockInfo[] blocks) {
|
||||||
if(this.blocks != null)
|
if(this.blocks != null)
|
||||||
return;
|
return;
|
||||||
int numBlocks = 0;
|
int numBlocks = 0;
|
||||||
|
@ -73,7 +73,7 @@ public class FileDiff extends
|
||||||
this.blocks = Arrays.copyOf(blocks, numBlocks);
|
this.blocks = Arrays.copyOf(blocks, numBlocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous[] getBlocks() {
|
public BlockInfo[] getBlocks() {
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ public class FileDiff extends
|
||||||
if (blocks == null || collectedBlocks == null) {
|
if (blocks == null || collectedBlocks == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (BlockInfoContiguous blk : blocks) {
|
for (BlockInfo blk : blocks) {
|
||||||
collectedBlocks.addDeleteBlock(blk);
|
collectedBlocks.addDeleteBlock(blk);
|
||||||
}
|
}
|
||||||
blocks = null;
|
blocks = null;
|
||||||
|
|
|
@ -21,7 +21,7 @@ import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
|
@ -59,14 +59,14 @@ public class FileDiffList extends
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) {
|
public BlockInfo[] findEarlierSnapshotBlocks(int snapshotId) {
|
||||||
assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
|
assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
|
||||||
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
|
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
List<FileDiff> diffs = this.asList();
|
List<FileDiff> diffs = this.asList();
|
||||||
int i = Collections.binarySearch(diffs, snapshotId);
|
int i = Collections.binarySearch(diffs, snapshotId);
|
||||||
BlockInfoContiguous[] blocks = null;
|
BlockInfo[] blocks = null;
|
||||||
for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
|
for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
|
||||||
blocks = diffs.get(i).getBlocks();
|
blocks = diffs.get(i).getBlocks();
|
||||||
if(blocks != null) {
|
if(blocks != null) {
|
||||||
|
@ -76,14 +76,14 @@ public class FileDiffList extends
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockInfoContiguous[] findLaterSnapshotBlocks(int snapshotId) {
|
public BlockInfo[] findLaterSnapshotBlocks(int snapshotId) {
|
||||||
assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
|
assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
|
||||||
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
|
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
List<FileDiff> diffs = this.asList();
|
List<FileDiff> diffs = this.asList();
|
||||||
int i = Collections.binarySearch(diffs, snapshotId);
|
int i = Collections.binarySearch(diffs, snapshotId);
|
||||||
BlockInfoContiguous[] blocks = null;
|
BlockInfo[] blocks = null;
|
||||||
for (i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
|
for (i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
|
||||||
blocks = diffs.get(i).getBlocks();
|
blocks = diffs.get(i).getBlocks();
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
|
@ -100,7 +100,7 @@ public class FileDiffList extends
|
||||||
*/
|
*/
|
||||||
void combineAndCollectSnapshotBlocks(
|
void combineAndCollectSnapshotBlocks(
|
||||||
INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
|
INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
|
||||||
BlockInfoContiguous[] removedBlocks = removed.getBlocks();
|
BlockInfo[] removedBlocks = removed.getBlocks();
|
||||||
if (removedBlocks == null) {
|
if (removedBlocks == null) {
|
||||||
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
|
||||||
assert sf != null : "FileWithSnapshotFeature is null";
|
assert sf != null : "FileWithSnapshotFeature is null";
|
||||||
|
@ -114,10 +114,10 @@ public class FileDiffList extends
|
||||||
if (earlierDiff != null) {
|
if (earlierDiff != null) {
|
||||||
earlierDiff.setBlocks(removedBlocks);
|
earlierDiff.setBlocks(removedBlocks);
|
||||||
}
|
}
|
||||||
BlockInfoContiguous[] earlierBlocks =
|
BlockInfo[] earlierBlocks =
|
||||||
(earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks());
|
(earlierDiff == null ? new BlockInfo[]{} : earlierDiff.getBlocks());
|
||||||
// Find later snapshot (or file itself) with blocks
|
// Find later snapshot (or file itself) with blocks
|
||||||
BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
|
BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
|
||||||
laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks;
|
laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks;
|
||||||
// Skip blocks, which belong to either the earlier or the later lists
|
// Skip blocks, which belong to either the earlier or the later lists
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
@ -129,7 +129,7 @@ public class FileDiffList extends
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Check if last block is part of truncate recovery
|
// Check if last block is part of truncate recovery
|
||||||
BlockInfoContiguous lastBlock = file.getLastBlock();
|
BlockInfo lastBlock = file.getLastBlock();
|
||||||
Block dontRemoveBlock = null;
|
Block dontRemoveBlock = null;
|
||||||
if (lastBlock != null && lastBlock.getBlockUCState().equals(
|
if (lastBlock != null && lastBlock.getBlockUCState().equals(
|
||||||
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
|
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
|
||||||
|
|
|
@ -22,7 +22,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
|
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
|
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
|
||||||
|
@ -208,7 +208,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
|
||||||
|
|
||||||
// Collect blocks that should be deleted
|
// Collect blocks that should be deleted
|
||||||
FileDiff last = diffs.getLast();
|
FileDiff last = diffs.getLast();
|
||||||
BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
|
BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
|
||||||
if(snapshotBlocks == null)
|
if(snapshotBlocks == null)
|
||||||
file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
|
file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
|
||||||
else
|
else
|
||||||
|
|
|
@ -108,7 +108,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
|
@ -1609,7 +1609,7 @@ public class DFSTestUtil {
|
||||||
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
|
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
|
||||||
ExtendedBlock blk) {
|
ExtendedBlock blk) {
|
||||||
BlockManager bm0 = nn.getNamesystem().getBlockManager();
|
BlockManager bm0 = nn.getNamesystem().getBlockManager();
|
||||||
BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
|
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
|
||||||
assertTrue("Block " + blk + " should be under construction, " +
|
assertTrue("Block " + blk + " should be under construction, " +
|
||||||
"got: " + storedBlock,
|
"got: " + storedBlock,
|
||||||
storedBlock instanceof BlockInfoContiguousUnderConstruction);
|
storedBlock instanceof BlockInfoContiguousUnderConstruction);
|
||||||
|
|
|
@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
|
@ -671,7 +671,7 @@ public class TestDecommission {
|
||||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
@Override
|
@Override
|
||||||
public Boolean get() {
|
public Boolean get() {
|
||||||
BlockInfoContiguous info =
|
BlockInfo info =
|
||||||
blockManager.getStoredBlock(b.getLocalBlock());
|
blockManager.getStoredBlock(b.getLocalBlock());
|
||||||
int count = 0;
|
int count = 0;
|
||||||
StringBuilder sb = new StringBuilder("Replica locations: ");
|
StringBuilder sb = new StringBuilder("Replica locations: ");
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class TestBlockInfo {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIsDeleted() {
|
public void testIsDeleted() {
|
||||||
BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);
|
BlockInfo blockInfo = new BlockInfo((short) 3);
|
||||||
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
||||||
blockInfo.setBlockCollection(bc);
|
blockInfo.setBlockCollection(bc);
|
||||||
Assert.assertFalse(blockInfo.isDeleted());
|
Assert.assertFalse(blockInfo.isDeleted());
|
||||||
|
@ -59,7 +59,7 @@ public class TestBlockInfo {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAddStorage() throws Exception {
|
public void testAddStorage() throws Exception {
|
||||||
BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);
|
BlockInfo blockInfo = new BlockInfo((short) 3);
|
||||||
|
|
||||||
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
|
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
|
||||||
|
|
||||||
|
@ -71,9 +71,9 @@ public class TestBlockInfo {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCopyConstructor() {
|
public void testCopyConstructor() {
|
||||||
BlockInfoContiguous old = new BlockInfoContiguous((short) 3);
|
BlockInfo old = new BlockInfo((short) 3);
|
||||||
try {
|
try {
|
||||||
BlockInfoContiguous copy = new BlockInfoContiguous(old);
|
BlockInfo copy = new BlockInfo(old);
|
||||||
assertEquals(old.getBlockCollection(), copy.getBlockCollection());
|
assertEquals(old.getBlockCollection(), copy.getBlockCollection());
|
||||||
assertEquals(old.getCapacity(), copy.getCapacity());
|
assertEquals(old.getCapacity(), copy.getCapacity());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -88,11 +88,11 @@ public class TestBlockInfo {
|
||||||
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
|
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
|
||||||
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
|
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
|
||||||
final int NUM_BLOCKS = 10;
|
final int NUM_BLOCKS = 10;
|
||||||
BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];
|
BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];
|
||||||
|
|
||||||
// Create a few dummy blocks and add them to the first storage.
|
// Create a few dummy blocks and add them to the first storage.
|
||||||
for (int i = 0; i < NUM_BLOCKS; ++i) {
|
for (int i = 0; i < NUM_BLOCKS; ++i) {
|
||||||
blockInfos[i] = new BlockInfoContiguous((short) 3);
|
blockInfos[i] = new BlockInfo((short) 3);
|
||||||
storage1.addBlock(blockInfos[i]);
|
storage1.addBlock(blockInfos[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,14 +111,14 @@ public class TestBlockInfo {
|
||||||
|
|
||||||
DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
|
DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
|
||||||
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
|
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
|
||||||
ArrayList<BlockInfoContiguous> blockInfoList = new ArrayList<BlockInfoContiguous>();
|
ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
|
||||||
int headIndex;
|
int headIndex;
|
||||||
int curIndex;
|
int curIndex;
|
||||||
|
|
||||||
LOG.info("Building block list...");
|
LOG.info("Building block list...");
|
||||||
for (int i = 0; i < MAX_BLOCKS; i++) {
|
for (int i = 0; i < MAX_BLOCKS; i++) {
|
||||||
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
|
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
|
||||||
blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
|
blockInfoList.add(new BlockInfo(blockList.get(i), (short) 3));
|
||||||
dd.addBlock(blockInfoList.get(i));
|
dd.addBlock(blockInfoList.get(i));
|
||||||
|
|
||||||
// index of the datanode should be 0
|
// index of the datanode should be 0
|
||||||
|
@ -129,7 +129,7 @@ public class TestBlockInfo {
|
||||||
// list length should be equal to the number of blocks we inserted
|
// list length should be equal to the number of blocks we inserted
|
||||||
LOG.info("Checking list length...");
|
LOG.info("Checking list length...");
|
||||||
assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks());
|
assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks());
|
||||||
Iterator<BlockInfoContiguous> it = dd.getBlockIterator();
|
Iterator<BlockInfo> it = dd.getBlockIterator();
|
||||||
int len = 0;
|
int len = 0;
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
it.next();
|
it.next();
|
||||||
|
@ -151,7 +151,7 @@ public class TestBlockInfo {
|
||||||
// move head of the list to the head - this should not change the list
|
// move head of the list to the head - this should not change the list
|
||||||
LOG.info("Moving head to the head...");
|
LOG.info("Moving head to the head...");
|
||||||
|
|
||||||
BlockInfoContiguous temp = dd.getBlockListHeadForTesting();
|
BlockInfo temp = dd.getBlockListHeadForTesting();
|
||||||
curIndex = 0;
|
curIndex = 0;
|
||||||
headIndex = 0;
|
headIndex = 0;
|
||||||
dd.moveBlockToHead(temp, curIndex, headIndex);
|
dd.moveBlockToHead(temp, curIndex, headIndex);
|
||||||
|
|
|
@ -147,7 +147,7 @@ public class TestBlockManager {
|
||||||
private void doBasicTest(int testIndex) {
|
private void doBasicTest(int testIndex) {
|
||||||
List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
|
List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
|
||||||
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
||||||
BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
|
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
|
||||||
|
|
||||||
DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
|
DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
|
||||||
assertEquals(2, pipeline.length);
|
assertEquals(2, pipeline.length);
|
||||||
|
@ -179,7 +179,7 @@ public class TestBlockManager {
|
||||||
// Block originally on A1, A2, B1
|
// Block originally on A1, A2, B1
|
||||||
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
|
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
|
||||||
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
||||||
BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
|
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
|
||||||
|
|
||||||
// Decommission two of the nodes (A1, A2)
|
// Decommission two of the nodes (A1, A2)
|
||||||
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1);
|
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1);
|
||||||
|
@ -223,7 +223,7 @@ public class TestBlockManager {
|
||||||
// Block originally on A1, A2, B1
|
// Block originally on A1, A2, B1
|
||||||
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
|
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
|
||||||
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
||||||
BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
|
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
|
||||||
|
|
||||||
// Decommission all of the nodes
|
// Decommission all of the nodes
|
||||||
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 3);
|
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 3);
|
||||||
|
@ -276,7 +276,7 @@ public class TestBlockManager {
|
||||||
// Block originally on A1, A2, B1
|
// Block originally on A1, A2, B1
|
||||||
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
|
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
|
||||||
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
|
||||||
BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
|
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
|
||||||
|
|
||||||
// Decommission all of the nodes in rack A
|
// Decommission all of the nodes in rack A
|
||||||
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
|
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
|
||||||
|
@ -335,7 +335,7 @@ public class TestBlockManager {
|
||||||
private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
|
private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
|
||||||
// Originally on only nodes in rack A.
|
// Originally on only nodes in rack A.
|
||||||
List<DatanodeDescriptor> origNodes = rackA;
|
List<DatanodeDescriptor> origNodes = rackA;
|
||||||
BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
|
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
|
||||||
DatanodeStorageInfo pipeline[] = scheduleSingleReplication(blockInfo);
|
DatanodeStorageInfo pipeline[] = scheduleSingleReplication(blockInfo);
|
||||||
|
|
||||||
assertEquals(2, pipeline.length); // single new copy
|
assertEquals(2, pipeline.length); // single new copy
|
||||||
|
@ -378,7 +378,7 @@ public class TestBlockManager {
|
||||||
* Tell the block manager that replication is completed for the given
|
* Tell the block manager that replication is completed for the given
|
||||||
* pipeline.
|
* pipeline.
|
||||||
*/
|
*/
|
||||||
private void fulfillPipeline(BlockInfoContiguous blockInfo,
|
private void fulfillPipeline(BlockInfo blockInfo,
|
||||||
DatanodeStorageInfo[] pipeline) throws IOException {
|
DatanodeStorageInfo[] pipeline) throws IOException {
|
||||||
for (int i = 1; i < pipeline.length; i++) {
|
for (int i = 1; i < pipeline.length; i++) {
|
||||||
DatanodeStorageInfo storage = pipeline[i];
|
DatanodeStorageInfo storage = pipeline[i];
|
||||||
|
@ -387,9 +387,9 @@ public class TestBlockManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) {
|
private BlockInfo blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) {
|
||||||
Block block = new Block(blkId);
|
Block block = new Block(blkId);
|
||||||
BlockInfoContiguous blockInfo = new BlockInfoContiguous(block, (short) 3);
|
BlockInfo blockInfo = new BlockInfo(block, (short) 3);
|
||||||
|
|
||||||
for (DatanodeDescriptor dn : nodes) {
|
for (DatanodeDescriptor dn : nodes) {
|
||||||
for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
|
for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
|
||||||
|
@ -431,10 +431,10 @@ public class TestBlockManager {
|
||||||
return nodes;
|
return nodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
|
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
|
||||||
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
||||||
Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication();
|
Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication();
|
||||||
BlockInfoContiguous blockInfo = blockOnNodes(blockId, nodes);
|
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
|
||||||
|
|
||||||
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
||||||
return blockInfo;
|
return blockInfo;
|
||||||
|
@ -677,21 +677,21 @@ public class TestBlockManager {
|
||||||
|
|
||||||
// blk_42 is finalized.
|
// blk_42 is finalized.
|
||||||
long receivedBlockId = 42; // arbitrary
|
long receivedBlockId = 42; // arbitrary
|
||||||
BlockInfoContiguous receivedBlock = addBlockToBM(receivedBlockId);
|
BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
|
||||||
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
|
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
|
||||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
|
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
|
||||||
builder.add(new FinalizedReplica(receivedBlock, null, null));
|
builder.add(new FinalizedReplica(receivedBlock, null, null));
|
||||||
|
|
||||||
// blk_43 is under construction.
|
// blk_43 is under construction.
|
||||||
long receivingBlockId = 43;
|
long receivingBlockId = 43;
|
||||||
BlockInfoContiguous receivingBlock = addUcBlockToBM(receivingBlockId);
|
BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
|
||||||
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock),
|
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock),
|
||||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
|
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
|
||||||
builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
|
builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
|
||||||
|
|
||||||
// blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
|
// blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
|
||||||
long receivingReceivedBlockId = 44;
|
long receivingReceivedBlockId = 44;
|
||||||
BlockInfoContiguous receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
|
BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
|
||||||
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
|
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
|
||||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
|
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
|
||||||
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
|
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
|
||||||
|
@ -709,7 +709,7 @@ public class TestBlockManager {
|
||||||
|
|
||||||
// blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
|
// blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
|
||||||
long existedBlockId = 46;
|
long existedBlockId = 46;
|
||||||
BlockInfoContiguous existedBlock = addBlockToBM(existedBlockId);
|
BlockInfo existedBlock = addBlockToBM(existedBlockId);
|
||||||
builder.add(new FinalizedReplica(existedBlock, null, null));
|
builder.add(new FinalizedReplica(existedBlock, null, null));
|
||||||
|
|
||||||
// process IBR and full BR
|
// process IBR and full BR
|
||||||
|
@ -735,17 +735,17 @@ public class TestBlockManager {
|
||||||
(ds) >= 0);
|
(ds) >= 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous addBlockToBM(long blkId) {
|
private BlockInfo addBlockToBM(long blkId) {
|
||||||
Block block = new Block(blkId);
|
Block block = new Block(blkId);
|
||||||
BlockInfoContiguous blockInfo =
|
BlockInfo blockInfo =
|
||||||
new BlockInfoContiguous(block, (short) 3);
|
new BlockInfo(block, (short) 3);
|
||||||
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
BlockCollection bc = Mockito.mock(BlockCollection.class);
|
||||||
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
|
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
|
||||||
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
bm.blocksMap.addBlockCollection(blockInfo, bc);
|
||||||
return blockInfo;
|
return blockInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous addUcBlockToBM(long blkId) {
|
private BlockInfo addUcBlockToBM(long blkId) {
|
||||||
Block block = new Block(blkId);
|
Block block = new Block(blkId);
|
||||||
BlockInfoContiguousUnderConstruction blockInfo =
|
BlockInfoContiguousUnderConstruction blockInfo =
|
||||||
new BlockInfoContiguousUnderConstruction(block, (short) 3);
|
new BlockInfoContiguousUnderConstruction(block, (short) 3);
|
||||||
|
|
|
@ -58,8 +58,8 @@ public class TestDatanodeDescriptor {
|
||||||
public void testBlocksCounter() throws Exception {
|
public void testBlocksCounter() throws Exception {
|
||||||
DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
|
DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
|
||||||
assertEquals(0, dd.numBlocks());
|
assertEquals(0, dd.numBlocks());
|
||||||
BlockInfoContiguous blk = new BlockInfoContiguous(new Block(1L), (short) 1);
|
BlockInfo blk = new BlockInfo(new Block(1L), (short) 1);
|
||||||
BlockInfoContiguous blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
|
BlockInfo blk1 = new BlockInfo(new Block(2L), (short) 2);
|
||||||
DatanodeStorageInfo[] storages = dd.getStorageInfos();
|
DatanodeStorageInfo[] storages = dd.getStorageInfos();
|
||||||
assertTrue(storages.length > 0);
|
assertTrue(storages.length > 0);
|
||||||
// add first block
|
// add first block
|
||||||
|
|
|
@ -159,7 +159,7 @@ public class TestPendingReplication {
|
||||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
|
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
Block block;
|
Block block;
|
||||||
BlockInfoContiguous blockInfo;
|
BlockInfo blockInfo;
|
||||||
try {
|
try {
|
||||||
cluster =
|
cluster =
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
|
new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
|
||||||
|
@ -178,7 +178,7 @@ public class TestPendingReplication {
|
||||||
//
|
//
|
||||||
|
|
||||||
block = new Block(1, 1, 0);
|
block = new Block(1, 1, 0);
|
||||||
blockInfo = new BlockInfoContiguous(block, (short) 3);
|
blockInfo = new BlockInfo(block, (short) 3);
|
||||||
|
|
||||||
pendingReplications.increment(block,
|
pendingReplications.increment(block,
|
||||||
DatanodeStorageInfo.toDatanodeDescriptors(
|
DatanodeStorageInfo.toDatanodeDescriptors(
|
||||||
|
|
|
@ -34,7 +34,6 @@ import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
|
||||||
|
@ -1219,7 +1218,7 @@ public class TestReplicationPolicy {
|
||||||
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
|
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
|
||||||
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
|
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
|
||||||
|
|
||||||
final BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
|
final BlockInfo info = new BlockInfo(block1, (short) 1);
|
||||||
final BlockCollection mbc = mock(BlockCollection.class);
|
final BlockCollection mbc = mock(BlockCollection.class);
|
||||||
when(mbc.getLastBlock()).thenReturn(info);
|
when(mbc.getLastBlock()).thenReturn(info);
|
||||||
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
|
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
|
||||||
|
@ -1241,12 +1240,12 @@ public class TestReplicationPolicy {
|
||||||
when(dn.isDecommissioned()).thenReturn(true);
|
when(dn.isDecommissioned()).thenReturn(true);
|
||||||
when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
|
when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
|
||||||
when(storage.getDatanodeDescriptor()).thenReturn(dn);
|
when(storage.getDatanodeDescriptor()).thenReturn(dn);
|
||||||
when(storage.removeBlock(any(BlockInfoContiguous.class))).thenReturn(true);
|
when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
|
||||||
when(storage.addBlock(any(BlockInfoContiguous.class))).thenReturn
|
when(storage.addBlock(any(BlockInfo.class))).thenReturn
|
||||||
(DatanodeStorageInfo.AddBlockResult.ADDED);
|
(DatanodeStorageInfo.AddBlockResult.ADDED);
|
||||||
ucBlock.addStorage(storage);
|
ucBlock.addStorage(storage);
|
||||||
|
|
||||||
when(mbc.setLastBlock((BlockInfoContiguous) any(), (DatanodeStorageInfo[]) any()))
|
when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any()))
|
||||||
.thenReturn(ucBlock);
|
.thenReturn(ucBlock);
|
||||||
|
|
||||||
bm.convertLastBlockToUnderConstruction(mbc, 0L);
|
bm.convertLastBlockToUnderConstruction(mbc, 0L);
|
||||||
|
|
|
@ -23,7 +23,7 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
|
|
||||||
|
@ -66,10 +66,10 @@ public class CreateEditsLog {
|
||||||
INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p,
|
INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p,
|
||||||
0L);
|
0L);
|
||||||
editLog.logMkDir(BASE_PATH, dirInode);
|
editLog.logMkDir(BASE_PATH, dirInode);
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[blocksPerFile];
|
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
|
||||||
for (int iB = 0; iB < blocksPerFile; ++iB) {
|
for (int iB = 0; iB < blocksPerFile; ++iB) {
|
||||||
blocks[iB] =
|
blocks[iB] =
|
||||||
new BlockInfoContiguous(new Block(0, blockSize, BLOCK_GENERATION_STAMP),
|
new BlockInfo(new Block(0, blockSize, BLOCK_GENERATION_STAMP),
|
||||||
replication);
|
replication);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ public class CreateEditsLog {
|
||||||
editLog.logMkDir(currentDir, dirInode);
|
editLog.logMkDir(currentDir, dirInode);
|
||||||
}
|
}
|
||||||
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
|
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
|
||||||
p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
|
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
|
||||||
fileUc.toUnderConstruction("", "");
|
fileUc.toUnderConstruction("", "");
|
||||||
editLog.logOpenFile(filePath, fileUc, false, false);
|
editLog.logOpenFile(filePath, fileUc, false, false);
|
||||||
editLog.logCloseFile(filePath, inode);
|
editLog.logCloseFile(filePath, inode);
|
||||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -87,21 +87,21 @@ public class TestAddBlock {
|
||||||
|
|
||||||
// check file1
|
// check file1
|
||||||
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
|
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
|
||||||
BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
|
BlockInfo[] file1Blocks = file1Node.getBlocks();
|
||||||
assertEquals(1, file1Blocks.length);
|
assertEquals(1, file1Blocks.length);
|
||||||
assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes());
|
assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes());
|
||||||
assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState());
|
assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState());
|
||||||
|
|
||||||
// check file2
|
// check file2
|
||||||
INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile();
|
INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile();
|
||||||
BlockInfoContiguous[] file2Blocks = file2Node.getBlocks();
|
BlockInfo[] file2Blocks = file2Node.getBlocks();
|
||||||
assertEquals(1, file2Blocks.length);
|
assertEquals(1, file2Blocks.length);
|
||||||
assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes());
|
assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes());
|
||||||
assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState());
|
assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState());
|
||||||
|
|
||||||
// check file3
|
// check file3
|
||||||
INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile();
|
INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile();
|
||||||
BlockInfoContiguous[] file3Blocks = file3Node.getBlocks();
|
BlockInfo[] file3Blocks = file3Node.getBlocks();
|
||||||
assertEquals(2, file3Blocks.length);
|
assertEquals(2, file3Blocks.length);
|
||||||
assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes());
|
assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes());
|
||||||
assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState());
|
assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState());
|
||||||
|
@ -110,7 +110,7 @@ public class TestAddBlock {
|
||||||
|
|
||||||
// check file4
|
// check file4
|
||||||
INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile();
|
INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile();
|
||||||
BlockInfoContiguous[] file4Blocks = file4Node.getBlocks();
|
BlockInfo[] file4Blocks = file4Node.getBlocks();
|
||||||
assertEquals(2, file4Blocks.length);
|
assertEquals(2, file4Blocks.length);
|
||||||
assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes());
|
assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes());
|
||||||
assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState());
|
assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState());
|
||||||
|
@ -141,7 +141,7 @@ public class TestAddBlock {
|
||||||
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
||||||
|
|
||||||
INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
|
INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
|
||||||
BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
|
BlockInfo[] fileBlocks = fileNode.getBlocks();
|
||||||
assertEquals(2, fileBlocks.length);
|
assertEquals(2, fileBlocks.length);
|
||||||
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
|
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
|
||||||
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
|
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.TestFileCreation;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
@ -91,12 +91,12 @@ public class TestBlockUnderConstruction {
|
||||||
" isUnderConstruction = " + inode.isUnderConstruction() +
|
" isUnderConstruction = " + inode.isUnderConstruction() +
|
||||||
" expected to be " + isFileOpen,
|
" expected to be " + isFileOpen,
|
||||||
inode.isUnderConstruction() == isFileOpen);
|
inode.isUnderConstruction() == isFileOpen);
|
||||||
BlockInfoContiguous[] blocks = inode.getBlocks();
|
BlockInfo[] blocks = inode.getBlocks();
|
||||||
assertTrue("File does not have blocks: " + inode.toString(),
|
assertTrue("File does not have blocks: " + inode.toString(),
|
||||||
blocks != null && blocks.length > 0);
|
blocks != null && blocks.length > 0);
|
||||||
|
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
BlockInfoContiguous curBlock;
|
BlockInfo curBlock;
|
||||||
// all blocks but the last two should be regular blocks
|
// all blocks but the last two should be regular blocks
|
||||||
for(; idx < blocks.length - 2; idx++) {
|
for(; idx < blocks.length - 2; idx++) {
|
||||||
curBlock = blocks[idx];
|
curBlock = blocks[idx];
|
||||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
|
@ -78,7 +78,7 @@ public class TestCommitBlockSynchronization {
|
||||||
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
|
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
|
||||||
doReturn(blockInfo).when(file).getLastBlock();
|
doReturn(blockInfo).when(file).getLastBlock();
|
||||||
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
|
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
|
||||||
any(INodeFile.class), any(BlockInfoContiguous.class));
|
any(INodeFile.class), any(BlockInfo.class));
|
||||||
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
|
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
|
||||||
|
|
||||||
return namesystemSpy;
|
return namesystemSpy;
|
||||||
|
@ -106,7 +106,7 @@ public class TestCommitBlockSynchronization {
|
||||||
lastBlock, genStamp, length, false, false, newTargets, null);
|
lastBlock, genStamp, length, false, false, newTargets, null);
|
||||||
|
|
||||||
// Simulate 'completing' the block.
|
// Simulate 'completing' the block.
|
||||||
BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
|
BlockInfo completedBlockInfo = new BlockInfo(block, (short) 1);
|
||||||
completedBlockInfo.setBlockCollection(file);
|
completedBlockInfo.setBlockCollection(file);
|
||||||
completedBlockInfo.setGenerationStamp(genStamp);
|
completedBlockInfo.setGenerationStamp(genStamp);
|
||||||
doReturn(completedBlockInfo).when(namesystemSpy)
|
doReturn(completedBlockInfo).when(namesystemSpy)
|
||||||
|
@ -178,7 +178,7 @@ public class TestCommitBlockSynchronization {
|
||||||
namesystemSpy.commitBlockSynchronization(
|
namesystemSpy.commitBlockSynchronization(
|
||||||
lastBlock, genStamp, length, true, false, newTargets, null);
|
lastBlock, genStamp, length, true, false, newTargets, null);
|
||||||
|
|
||||||
BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
|
BlockInfo completedBlockInfo = new BlockInfo(block, (short) 1);
|
||||||
completedBlockInfo.setBlockCollection(file);
|
completedBlockInfo.setBlockCollection(file);
|
||||||
completedBlockInfo.setGenerationStamp(genStamp);
|
completedBlockInfo.setGenerationStamp(genStamp);
|
||||||
doReturn(completedBlockInfo).when(namesystemSpy)
|
doReturn(completedBlockInfo).when(namesystemSpy)
|
||||||
|
|
|
@ -69,7 +69,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
|
@ -205,7 +205,7 @@ public class TestEditLog {
|
||||||
|
|
||||||
for (int i = 0; i < numTransactions; i++) {
|
for (int i = 0; i < numTransactions; i++) {
|
||||||
INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
|
INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
|
||||||
p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
|
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
|
||||||
inode.toUnderConstruction("", "");
|
inode.toUnderConstruction("", "");
|
||||||
|
|
||||||
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
|
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
|
||||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||||
|
@ -105,7 +105,7 @@ public class TestFSImage {
|
||||||
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
|
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
|
||||||
assertEquals("hello".length(), file2Node.computeFileSize());
|
assertEquals("hello".length(), file2Node.computeFileSize());
|
||||||
assertTrue(file2Node.isUnderConstruction());
|
assertTrue(file2Node.isUnderConstruction());
|
||||||
BlockInfoContiguous[] blks = file2Node.getBlocks();
|
BlockInfo[] blks = file2Node.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
|
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
|
||||||
// check lease manager
|
// check lease manager
|
||||||
|
|
|
@ -79,7 +79,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||||
|
@ -885,7 +885,7 @@ public class TestFsck {
|
||||||
// intentionally corrupt NN data structure
|
// intentionally corrupt NN data structure
|
||||||
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
|
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
|
||||||
(fileName, true);
|
(fileName, true);
|
||||||
final BlockInfoContiguous[] blocks = node.getBlocks();
|
final BlockInfo[] blocks = node.getBlocks();
|
||||||
assertEquals(blocks.length, 1);
|
assertEquals(blocks.length, 1);
|
||||||
blocks[0].setNumBytes(-1L); // set the block length to be negative
|
blocks[0].setNumBytes(-1L); // set the block length to be negative
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.commons.io.Charsets;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.invocation.InvocationOnMock;
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
import org.mockito.stubbing.Answer;
|
import org.mockito.stubbing.Answer;
|
||||||
|
@ -125,7 +125,7 @@ public class TestGetBlockLocations {
|
||||||
FsPermission.createImmutable((short) 0x1ff));
|
FsPermission.createImmutable((short) 0x1ff));
|
||||||
final INodeFile file = new INodeFile(
|
final INodeFile file = new INodeFile(
|
||||||
MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
|
MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
|
||||||
perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
|
perm, 1, 1, new BlockInfo[] {}, (short) 1,
|
||||||
DFS_BLOCK_SIZE_DEFAULT);
|
DFS_BLOCK_SIZE_DEFAULT);
|
||||||
fsn.getFSDirectory().addINode(iip, file);
|
fsn.getFSDirectory().addINode(iip, file);
|
||||||
return fsn;
|
return fsn;
|
||||||
|
|
|
@ -60,7 +60,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -290,7 +290,7 @@ public class TestINodeFile {
|
||||||
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
|
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
|
||||||
preferredBlockSize);
|
preferredBlockSize);
|
||||||
iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
|
iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
|
||||||
BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
|
BlockInfo newblock = new BlockInfo(replication);
|
||||||
iNodes[i].addBlock(newblock);
|
iNodes[i].addBlock(newblock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
|
||||||
|
@ -100,7 +100,7 @@ public class TestTruncateQuotaUpdate {
|
||||||
@Test
|
@Test
|
||||||
public void testTruncateWithSnapshotAndDivergence() {
|
public void testTruncateWithSnapshotAndDivergence() {
|
||||||
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
|
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous
|
BlockInfo[] blocks = new BlockInfo
|
||||||
[file.getBlocks().length];
|
[file.getBlocks().length];
|
||||||
System.arraycopy(file.getBlocks(), 0, blocks, 0, blocks.length);
|
System.arraycopy(file.getBlocks(), 0, blocks, 0, blocks.length);
|
||||||
addSnapshotFeature(file, blocks);
|
addSnapshotFeature(file, blocks);
|
||||||
|
@ -130,11 +130,11 @@ public class TestTruncateQuotaUpdate {
|
||||||
}
|
}
|
||||||
|
|
||||||
private INodeFile createMockFile(long size, short replication) {
|
private INodeFile createMockFile(long size, short replication) {
|
||||||
ArrayList<BlockInfoContiguous> blocks = new ArrayList<>();
|
ArrayList<BlockInfo> blocks = new ArrayList<>();
|
||||||
long createdSize = 0;
|
long createdSize = 0;
|
||||||
while (createdSize < size) {
|
while (createdSize < size) {
|
||||||
long blockSize = Math.min(BLOCKSIZE, size - createdSize);
|
long blockSize = Math.min(BLOCKSIZE, size - createdSize);
|
||||||
BlockInfoContiguous bi = newBlock(blockSize, replication);
|
BlockInfo bi = newBlock(blockSize, replication);
|
||||||
blocks.add(bi);
|
blocks.add(bi);
|
||||||
createdSize += BLOCKSIZE;
|
createdSize += BLOCKSIZE;
|
||||||
}
|
}
|
||||||
|
@ -142,16 +142,16 @@ public class TestTruncateQuotaUpdate {
|
||||||
.createImmutable((short) 0x1ff));
|
.createImmutable((short) 0x1ff));
|
||||||
return new INodeFile(
|
return new INodeFile(
|
||||||
++nextMockINodeId, new byte[0], perm, 0, 0,
|
++nextMockINodeId, new byte[0], perm, 0, 0,
|
||||||
blocks.toArray(new BlockInfoContiguous[blocks.size()]), replication,
|
blocks.toArray(new BlockInfo[blocks.size()]), replication,
|
||||||
BLOCKSIZE);
|
BLOCKSIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfoContiguous newBlock(long size, short replication) {
|
private BlockInfo newBlock(long size, short replication) {
|
||||||
Block b = new Block(++nextMockBlockId, size, ++nextMockGenstamp);
|
Block b = new Block(++nextMockBlockId, size, ++nextMockGenstamp);
|
||||||
return new BlockInfoContiguous(b, replication);
|
return new BlockInfo(b, replication);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void addSnapshotFeature(INodeFile file, BlockInfoContiguous[] blocks) {
|
private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
|
||||||
FileDiff diff = mock(FileDiff.class);
|
FileDiff diff = mock(FileDiff.class);
|
||||||
when(diff.getBlocks()).thenReturn(blocks);
|
when(diff.getBlocks()).thenReturn(blocks);
|
||||||
FileDiffList diffList = new FileDiffList();
|
FileDiffList diffList = new FileDiffList();
|
||||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
|
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
|
||||||
|
@ -176,7 +176,7 @@ public class SnapshotTestHelper {
|
||||||
*
|
*
|
||||||
* Specific information for different types of INode:
|
* Specific information for different types of INode:
|
||||||
* {@link INodeDirectory}:childrenSize
|
* {@link INodeDirectory}:childrenSize
|
||||||
* {@link INodeFile}: fileSize, block list. Check {@link BlockInfoContiguous#toString()}
|
* {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()}
|
||||||
* and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information.
|
* and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information.
|
||||||
* {@link FileWithSnapshot}: next link
|
* {@link FileWithSnapshot}: next link
|
||||||
* </pre>
|
* </pre>
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
|
@ -49,8 +49,8 @@ public class TestFileWithSnapshotFeature {
|
||||||
FileDiff diff = mock(FileDiff.class);
|
FileDiff diff = mock(FileDiff.class);
|
||||||
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
|
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
|
||||||
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
|
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[] {
|
BlockInfo[] blocks = new BlockInfo[] {
|
||||||
new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1)
|
new BlockInfo(new Block(1, BLOCK_SIZE, 1), REPL_1)
|
||||||
};
|
};
|
||||||
|
|
||||||
// No snapshot
|
// No snapshot
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
@ -108,14 +108,14 @@ public class TestSnapshotBlocksMap {
|
||||||
final FSDirectory dir, final BlockManager blkManager) throws Exception {
|
final FSDirectory dir, final BlockManager blkManager) throws Exception {
|
||||||
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
|
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
|
||||||
assertEquals(numBlocks, file.getBlocks().length);
|
assertEquals(numBlocks, file.getBlocks().length);
|
||||||
for(BlockInfoContiguous b : file.getBlocks()) {
|
for(BlockInfo b : file.getBlocks()) {
|
||||||
assertBlockCollection(blkManager, file, b);
|
assertBlockCollection(blkManager, file, b);
|
||||||
}
|
}
|
||||||
return file;
|
return file;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assertBlockCollection(final BlockManager blkManager,
|
static void assertBlockCollection(final BlockManager blkManager,
|
||||||
final INodeFile file, final BlockInfoContiguous b) {
|
final INodeFile file, final BlockInfo b) {
|
||||||
Assert.assertSame(b, blkManager.getStoredBlock(b));
|
Assert.assertSame(b, blkManager.getStoredBlock(b));
|
||||||
Assert.assertSame(file, blkManager.getBlockCollection(b));
|
Assert.assertSame(file, blkManager.getBlockCollection(b));
|
||||||
Assert.assertSame(file, b.getBlockCollection());
|
Assert.assertSame(file, b.getBlockCollection());
|
||||||
|
@ -146,10 +146,10 @@ public class TestSnapshotBlocksMap {
|
||||||
{
|
{
|
||||||
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
|
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
|
||||||
blockmanager);
|
blockmanager);
|
||||||
BlockInfoContiguous[] blocks = f2.getBlocks();
|
BlockInfo[] blocks = f2.getBlocks();
|
||||||
hdfs.delete(sub2, true);
|
hdfs.delete(sub2, true);
|
||||||
// The INode should have been removed from the blocksMap
|
// The INode should have been removed from the blocksMap
|
||||||
for(BlockInfoContiguous b : blocks) {
|
for(BlockInfo b : blocks) {
|
||||||
assertNull(blockmanager.getBlockCollection(b));
|
assertNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ public class TestSnapshotBlocksMap {
|
||||||
// Check the block information for file0
|
// Check the block information for file0
|
||||||
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
|
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
|
||||||
blockmanager);
|
blockmanager);
|
||||||
BlockInfoContiguous[] blocks0 = f0.getBlocks();
|
BlockInfo[] blocks0 = f0.getBlocks();
|
||||||
|
|
||||||
// Also check the block information for snapshot of file0
|
// Also check the block information for snapshot of file0
|
||||||
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
|
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
|
||||||
|
@ -187,7 +187,7 @@ public class TestSnapshotBlocksMap {
|
||||||
// Delete file0
|
// Delete file0
|
||||||
hdfs.delete(file0, true);
|
hdfs.delete(file0, true);
|
||||||
// Make sure the blocks of file0 is still in blocksMap
|
// Make sure the blocks of file0 is still in blocksMap
|
||||||
for(BlockInfoContiguous b : blocks0) {
|
for(BlockInfo b : blocks0) {
|
||||||
assertNotNull(blockmanager.getBlockCollection(b));
|
assertNotNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
|
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
|
||||||
|
@ -201,7 +201,7 @@ public class TestSnapshotBlocksMap {
|
||||||
hdfs.deleteSnapshot(sub1, "s1");
|
hdfs.deleteSnapshot(sub1, "s1");
|
||||||
|
|
||||||
// Make sure the first block of file0 is still in blocksMap
|
// Make sure the first block of file0 is still in blocksMap
|
||||||
for(BlockInfoContiguous b : blocks0) {
|
for(BlockInfo b : blocks0) {
|
||||||
assertNotNull(blockmanager.getBlockCollection(b));
|
assertNotNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
|
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
|
||||||
|
@ -293,7 +293,7 @@ public class TestSnapshotBlocksMap {
|
||||||
hdfs.append(bar);
|
hdfs.append(bar);
|
||||||
|
|
||||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
BlockInfo[] blks = barNode.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
||||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||||
|
@ -331,7 +331,7 @@ public class TestSnapshotBlocksMap {
|
||||||
hdfs.append(bar);
|
hdfs.append(bar);
|
||||||
|
|
||||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
BlockInfo[] blks = barNode.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||||
cluster.getNameNodeRpc()
|
cluster.getNameNodeRpc()
|
||||||
|
@ -370,7 +370,7 @@ public class TestSnapshotBlocksMap {
|
||||||
hdfs.append(bar);
|
hdfs.append(bar);
|
||||||
|
|
||||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
BlockInfo[] blks = barNode.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||||
cluster.getNameNodeRpc()
|
cluster.getNameNodeRpc()
|
||||||
|
@ -421,7 +421,7 @@ public class TestSnapshotBlocksMap {
|
||||||
out.write(testData);
|
out.write(testData);
|
||||||
out.close();
|
out.close();
|
||||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
BlockInfo[] blks = barNode.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
assertEquals(testData.length, blks[0].getNumBytes());
|
assertEquals(testData.length, blks[0].getNumBytes());
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
@ -262,12 +262,12 @@ public class TestSnapshotDeletion {
|
||||||
DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
|
DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
|
||||||
final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
|
final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
|
||||||
tempFile.toString(), 1, fsdir, blockmanager);
|
tempFile.toString(), 1, fsdir, blockmanager);
|
||||||
BlockInfoContiguous[] blocks = temp.getBlocks();
|
BlockInfo[] blocks = temp.getBlocks();
|
||||||
hdfs.delete(tempDir, true);
|
hdfs.delete(tempDir, true);
|
||||||
// check dir's quota usage
|
// check dir's quota usage
|
||||||
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
|
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
|
||||||
// check blocks of tempFile
|
// check blocks of tempFile
|
||||||
for (BlockInfoContiguous b : blocks) {
|
for (BlockInfo b : blocks) {
|
||||||
assertNull(blockmanager.getBlockCollection(b));
|
assertNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ public class TestSnapshotDeletion {
|
||||||
// while deletion, we add diff for subsub and metaChangeFile1, and remove
|
// while deletion, we add diff for subsub and metaChangeFile1, and remove
|
||||||
// newFile
|
// newFile
|
||||||
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
|
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
|
||||||
for (BlockInfoContiguous b : blocks) {
|
for (BlockInfo b : blocks) {
|
||||||
assertNull(blockmanager.getBlockCollection(b));
|
assertNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,7 +481,7 @@ public class TestSnapshotDeletion {
|
||||||
|
|
||||||
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
|
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
|
||||||
.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
|
.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
|
||||||
BlockInfoContiguous[] blocks = toDeleteFileNode.getBlocks();
|
BlockInfo[] blocks = toDeleteFileNode.getBlocks();
|
||||||
|
|
||||||
// create snapshot s0 on dir
|
// create snapshot s0 on dir
|
||||||
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
|
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
|
||||||
|
@ -507,7 +507,7 @@ public class TestSnapshotDeletion {
|
||||||
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
|
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
|
||||||
// metaChangeFile's replication factor decreases
|
// metaChangeFile's replication factor decreases
|
||||||
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
|
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
|
||||||
for (BlockInfoContiguous b : blocks) {
|
for (BlockInfo b : blocks) {
|
||||||
assertNull(blockmanager.getBlockCollection(b));
|
assertNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -801,7 +801,7 @@ public class TestSnapshotDeletion {
|
||||||
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
|
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
|
||||||
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
|
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
|
||||||
file14_s2.toString(), 1, fsdir, blockmanager);
|
file14_s2.toString(), 1, fsdir, blockmanager);
|
||||||
BlockInfoContiguous[] blocks_14 = file14Node.getBlocks();
|
BlockInfo[] blocks_14 = file14Node.getBlocks();
|
||||||
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
|
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
|
||||||
blockmanager);
|
blockmanager);
|
||||||
|
|
||||||
|
@ -838,7 +838,7 @@ public class TestSnapshotDeletion {
|
||||||
modDirStr + "file15");
|
modDirStr + "file15");
|
||||||
assertFalse(hdfs.exists(file14_s1));
|
assertFalse(hdfs.exists(file14_s1));
|
||||||
assertFalse(hdfs.exists(file15_s1));
|
assertFalse(hdfs.exists(file15_s1));
|
||||||
for (BlockInfoContiguous b : blocks_14) {
|
for (BlockInfo b : blocks_14) {
|
||||||
assertNull(blockmanager.getBlockCollection(b));
|
assertNull(blockmanager.getBlockCollection(b));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue