HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed by Jing Zhao.

This commit is contained in:
Jing Zhao 2015-02-25 22:10:26 -08:00 committed by Zhe Zhang
parent dae27f6dd1
commit 9f2f583f40
35 changed files with 967 additions and 291 deletions

View File

@ -172,6 +172,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos; import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
@ -184,6 +185,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@ -428,6 +430,21 @@ public class PBHelper {
return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp()); return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
} }
public static BlockInfoStriped convert(StripedBlockProto p) {
return new BlockInfoStriped(convert(p.getBlock()),
(short) p.getDataBlockNum(), (short) p.getParityBlockNum());
}
public static StripedBlockProto convert(BlockInfoStriped blk) {
BlockProto bp = BlockProto.newBuilder().setBlockId(blk.getBlockId())
.setGenStamp(blk.getGenerationStamp()).setNumBytes(blk.getNumBytes())
.build();
return StripedBlockProto.newBuilder()
.setDataBlockNum(blk.getDataBlockNum())
.setParityBlockNum(blk.getParityBlockNum())
.setBlock(bp).build();
}
public static BlockWithLocationsProto convert(BlockWithLocations blk) { public static BlockWithLocationsProto convert(BlockWithLocations blk) {
return BlockWithLocationsProto.newBuilder() return BlockWithLocationsProto.newBuilder()
.setBlock(convert(blk.getBlock())) .setBlock(convert(blk.getBlock()))

View File

@ -31,7 +31,7 @@ public interface BlockCollection {
/** /**
* Get the last block of the collection. * Get the last block of the collection.
*/ */
public BlockInfoContiguous getLastBlock(); public BlockInfo getLastBlock();
/** /**
* Get content summary. * Get content summary.
@ -44,9 +44,9 @@ public interface BlockCollection {
public int numBlocks(); public int numBlocks();
/** /**
* Get the blocks or block groups. * Get the blocks (striped or contiguous).
*/ */
public BlockInfoContiguous[] getBlocks(); public BlockInfo[] getBlocks();
/** /**
* Get preferred block size for the collection * Get preferred block size for the collection
@ -71,16 +71,15 @@ public interface BlockCollection {
public String getName(); public String getName();
/** /**
* Set the block/block-group at the given index. * Set the block (contiguous or striped) at the given index.
*/ */
public void setBlock(int index, BlockInfoContiguous blk); public void setBlock(int index, BlockInfo blk);
/** /**
* Convert the last block of the collection to an under-construction block * Convert the last block of the collection to an under-construction block
* and set the locations. * and set the locations.
*/ */
public BlockInfoContiguousUnderConstruction setLastBlock( public void convertLastBlockToUC(BlockInfo lastBlock,
BlockInfoContiguous lastBlock,
DatanodeStorageInfo[] targets) throws IOException; DatanodeStorageInfo[] targets) throws IOException;
/** /**

View File

@ -21,6 +21,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.LightWeightGSet;
import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
/** /**
@ -289,8 +290,9 @@ public abstract class BlockInfo extends Block
/** /**
* BlockInfo represents a block that is not being constructed. * BlockInfo represents a block that is not being constructed.
* In order to start modifying the block, the BlockInfo should be converted * In order to start modifying the block, the BlockInfo should be converted to
* to {@link BlockInfoContiguousUnderConstruction}. * {@link BlockInfoContiguousUnderConstruction} or
* {@link BlockInfoStripedUnderConstruction}.
* @return {@link HdfsServerConstants.BlockUCState#COMPLETE} * @return {@link HdfsServerConstants.BlockUCState#COMPLETE}
*/ */
public HdfsServerConstants.BlockUCState getBlockUCState() { public HdfsServerConstants.BlockUCState getBlockUCState() {
@ -340,4 +342,86 @@ public abstract class BlockInfo extends Block
return new BlockInfoStriped((BlockInfoStriped) b); return new BlockInfoStriped((BlockInfoStriped) b);
} }
} }
static BlockInfo convertToCompleteBlock(BlockInfo blk) throws IOException {
if (blk instanceof BlockInfoContiguousUnderConstruction) {
return ((BlockInfoContiguousUnderConstruction) blk)
.convertToCompleteBlock();
} else if (blk instanceof BlockInfoStripedUnderConstruction) {
return ((BlockInfoStripedUnderConstruction) blk).convertToCompleteBlock();
} else {
return blk;
}
}
static void commitBlock(BlockInfo blockInfo, Block reported)
throws IOException {
if (blockInfo instanceof BlockInfoContiguousUnderConstruction) {
((BlockInfoContiguousUnderConstruction) blockInfo).commitBlock(reported);
} else if (blockInfo instanceof BlockInfoStripedUnderConstruction) {
((BlockInfoStripedUnderConstruction) blockInfo).commitBlock(reported);
}
}
static void addReplica(BlockInfo ucBlock, DatanodeStorageInfo storageInfo,
Block reportedBlock, HdfsServerConstants.ReplicaState reportedState) {
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
ucBlock instanceof BlockInfoStripedUnderConstruction;
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
((BlockInfoContiguousUnderConstruction) ucBlock).addReplicaIfNotPresent(
storageInfo, reportedBlock, reportedState);
} else { // StripedUC
((BlockInfoStripedUnderConstruction) ucBlock).addReplicaIfNotPresent(
storageInfo, reportedBlock, reportedState);
}
}
static int getNumExpectedLocations(BlockInfo ucBlock) {
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
ucBlock instanceof BlockInfoStripedUnderConstruction;
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
return ((BlockInfoContiguousUnderConstruction) ucBlock)
.getNumExpectedLocations();
} else { // StripedUC
return ((BlockInfoStripedUnderConstruction) ucBlock)
.getNumExpectedLocations();
}
}
public static DatanodeStorageInfo[] getExpectedStorageLocations(
BlockInfo ucBlock) {
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
ucBlock instanceof BlockInfoStripedUnderConstruction;
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
return ((BlockInfoContiguousUnderConstruction) ucBlock)
.getExpectedStorageLocations();
} else { // StripedUC
return ((BlockInfoStripedUnderConstruction) ucBlock)
.getExpectedStorageLocations();
}
}
public static void setExpectedLocations(BlockInfo ucBlock,
DatanodeStorageInfo[] targets) {
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
ucBlock instanceof BlockInfoStripedUnderConstruction;
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
((BlockInfoContiguousUnderConstruction) ucBlock)
.setExpectedLocations(targets);
} else { // StripedUC
((BlockInfoStripedUnderConstruction) ucBlock)
.setExpectedLocations(targets);
}
}
public static long getBlockRecoveryId(BlockInfo ucBlock) {
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
ucBlock instanceof BlockInfoStripedUnderConstruction;
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
return ((BlockInfoContiguousUnderConstruction) ucBlock)
.getBlockRecoveryId();
} else { // StripedUC
return ((BlockInfoStripedUnderConstruction) ucBlock).getBlockRecoveryId();
}
}
} }

View File

@ -74,7 +74,7 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
BlockUCState state, DatanodeStorageInfo[] targets) { BlockUCState state, DatanodeStorageInfo[] targets) {
super(blk, replication); super(blk, replication);
assert getBlockUCState() != BlockUCState.COMPLETE : assert getBlockUCState() != BlockUCState.COMPLETE :
"BlockInfoUnderConstruction cannot be in COMPLETE state"; "BlockInfoContiguousUnderConstruction cannot be in COMPLETE state";
this.blockUCState = state; this.blockUCState = state;
setExpectedLocations(targets); setExpectedLocations(targets);
} }
@ -82,7 +82,7 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
/** /**
* Convert an under construction block to a complete block. * Convert an under construction block to a complete block.
* *
* @return BlockInfo - a complete block. * @return BlockInfoContiguous - a complete block.
* @throws IOException if the state of the block * @throws IOException if the state of the block
* (the generation stamp and the length) has not been committed by * (the generation stamp and the length) has not been committed by
* the client or it does not have at least a minimal number of replicas * the client or it does not have at least a minimal number of replicas
@ -197,7 +197,7 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
blockRecoveryId = recoveryId; blockRecoveryId = recoveryId;
if (replicas.size() == 0) { if (replicas.size() == 0) {
NameNode.blockStateChangeLog.warn("BLOCK*" NameNode.blockStateChangeLog.warn("BLOCK*"
+ " BlockInfoUnderConstruction.initLeaseRecovery:" + " BlockInfoContiguousUnderConstruction.initLeaseRecovery:"
+ " No blocks found, lease removed."); + " No blocks found, lease removed.");
} }
boolean allLiveReplicasTriedAsPrimary = true; boolean allLiveReplicasTriedAsPrimary = true;

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
/** /**
* Subclass of {@link BlockInfo}, presenting a block group in erasure coding. * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
@ -59,6 +61,14 @@ public class BlockInfoStriped extends BlockInfo {
return (short) (dataBlockNum + parityBlockNum); return (short) (dataBlockNum + parityBlockNum);
} }
public short getDataBlockNum() {
return dataBlockNum;
}
public short getParityBlockNum() {
return parityBlockNum;
}
private void initIndices() { private void initIndices() {
for (int i = 0; i < indices.length; i++) { for (int i = 0; i < indices.length; i++) {
indices[i] = -1; indices[i] = -1;
@ -176,4 +186,25 @@ public class BlockInfoStriped extends BlockInfo {
} }
return num; return num;
} }
/**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction - an under construction block.
*/
public BlockInfoStripedUnderConstruction convertToBlockUnderConstruction(
BlockUCState s, DatanodeStorageInfo[] targets) {
final BlockInfoStripedUnderConstruction ucBlock;
if(isComplete()) {
ucBlock = new BlockInfoStripedUnderConstruction(this, getDataBlockNum(),
getParityBlockNum(), s, targets);
ucBlock.setBlockCollection(getBlockCollection());
} else {
// the block is already under construction
ucBlock = (BlockInfoStripedUnderConstruction) this;
ucBlock.setBlockUCState(s);
ucBlock.setExpectedLocations(targets);
ucBlock.setBlockCollection(getBlockCollection());
}
return ucBlock;
}
} }

View File

@ -0,0 +1,240 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.COMPLETE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
/**
* Represents a striped block that is currently being constructed.
* This is usually the last block of a file opened for write or append.
*/
public class BlockInfoStripedUnderConstruction extends BlockInfoStriped {
private BlockUCState blockUCState;
/**
* Block replicas as assigned when the block was allocated.
*
* TODO: we need to update this attribute, along with the return type of
* getExpectedStorageLocations and LocatedBlock. For striped blocks, clients
* need to understand the index of each striped block in the block group.
*/
private List<ReplicaUnderConstruction> replicas;
/**
* The new generation stamp, which this block will have
* after the recovery succeeds. Also used as a recovery id to identify
* the right recovery if any of the abandoned recoveries re-appear.
*/
private long blockRecoveryId = 0;
/**
* Constructor with null storage targets.
*/
public BlockInfoStripedUnderConstruction(Block blk, short dataBlockNum,
short parityBlockNum) {
this(blk, dataBlockNum, parityBlockNum, UNDER_CONSTRUCTION, null);
}
/**
* Create a striped block that is currently being constructed.
*/
public BlockInfoStripedUnderConstruction(Block blk, short dataBlockNum,
short parityBlockNum, BlockUCState state, DatanodeStorageInfo[] targets) {
super(blk, dataBlockNum, parityBlockNum);
assert getBlockUCState() != COMPLETE :
"BlockInfoStripedUnderConstruction cannot be in COMPLETE state";
this.blockUCState = state;
setExpectedLocations(targets);
}
/**
* Convert an under construction striped block to a complete striped block.
*
* @return BlockInfoStriped - a complete block.
* @throws IOException if the state of the block
* (the generation stamp and the length) has not been committed by
* the client or it does not have at least a minimal number of replicas
* reported from data-nodes.
*/
BlockInfoStriped convertToCompleteBlock() throws IOException {
assert getBlockUCState() != COMPLETE :
"Trying to convert a COMPLETE block";
return new BlockInfoStriped(this);
}
/** Set expected locations */
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<>(numLocations);
for(int i = 0; i < numLocations; i++) {
replicas.add(new ReplicaUnderConstruction(this, targets[i],
ReplicaState.RBW));
}
}
/**
* Create array of expected replica locations
* (as has been assigned by chooseTargets()).
*/
public DatanodeStorageInfo[] getExpectedStorageLocations() {
int numLocations = getNumExpectedLocations();
DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations];
for (int i = 0; i < numLocations; i++) {
storages[i] = replicas.get(i).getExpectedStorageLocation();
}
return storages;
}
/** Get the number of expected locations */
public int getNumExpectedLocations() {
return replicas == null ? 0 : replicas.size();
}
/**
* Return the state of the block under construction.
* @see BlockUCState
*/
@Override // BlockInfo
public BlockUCState getBlockUCState() {
return blockUCState;
}
void setBlockUCState(BlockUCState s) {
blockUCState = s;
}
/** Get block recovery ID */
public long getBlockRecoveryId() {
return blockRecoveryId;
}
/**
* Process the recorded replicas. When about to commit or finish the
* pipeline recovery sort out bad replicas.
* @param genStamp The final generation stamp for the block.
*/
public void setGenerationStampAndVerifyReplicas(long genStamp) {
// Set the generation stamp for the block.
setGenerationStamp(genStamp);
if (replicas == null)
return;
// Remove the replicas with wrong gen stamp.
// The replica list is unchanged.
for (ReplicaUnderConstruction r : replicas) {
if (genStamp != r.getGenerationStamp()) {
r.getExpectedStorageLocation().removeBlock(this);
NameNode.blockStateChangeLog.info("BLOCK* Removing stale replica "
+ "from location: {}", r.getExpectedStorageLocation());
}
}
}
/**
* Commit block's length and generation stamp as reported by the client.
* Set block state to {@link BlockUCState#COMMITTED}.
* @param block - contains client reported block length and generation
*/
void commitBlock(Block block) throws IOException {
if (getBlockId() != block.getBlockId()) {
throw new IOException("Trying to commit inconsistent block: id = "
+ block.getBlockId() + ", expected id = " + getBlockId());
}
blockUCState = BlockUCState.COMMITTED;
this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
// Sort out invalid replicas.
setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
}
/**
* Initialize lease recovery for this striped block.
*/
public void initializeBlockRecovery(long recoveryId) {
setBlockUCState(BlockUCState.UNDER_RECOVERY);
blockRecoveryId = recoveryId;
if (replicas == null || replicas.size() == 0) {
NameNode.blockStateChangeLog.warn("BLOCK*" +
" BlockInfoUnderConstruction.initLeaseRecovery:" +
" No blocks found, lease removed.");
}
// TODO we need to implement different recovery logic here
}
void addReplicaIfNotPresent(DatanodeStorageInfo storage, Block block,
ReplicaState rState) {
Iterator<ReplicaUnderConstruction> it = replicas.iterator();
while (it.hasNext()) {
ReplicaUnderConstruction r = it.next();
DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
if (expectedLocation == storage) {
// Record the gen stamp from the report
r.setGenerationStamp(block.getGenerationStamp());
return;
} else if (expectedLocation != null &&
expectedLocation.getDatanodeDescriptor() ==
storage.getDatanodeDescriptor()) {
// The Datanode reported that the block is on a different storage
// than the one chosen by BlockPlacementPolicy. This can occur as
// we allow Datanodes to choose the target storage. Update our
// state by removing the stale entry and adding a new one.
it.remove();
break;
}
}
replicas.add(new ReplicaUnderConstruction(block, storage, rState));
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder(100);
appendStringTo(b);
return b.toString();
}
@Override
public void appendStringTo(StringBuilder sb) {
super.appendStringTo(sb);
appendUCParts(sb);
}
private void appendUCParts(StringBuilder sb) {
sb.append("{UCState=").append(blockUCState).append(", replicas=[");
if (replicas != null) {
Iterator<ReplicaUnderConstruction> iter = replicas.iterator();
if (iter.hasNext()) {
iter.next().appendStringTo(sb);
while (iter.hasNext()) {
sb.append(", ");
iter.next().appendStringTo(sb);
}
}
}
sb.append("]}");
}
}

View File

@ -543,8 +543,8 @@ public class BlockManager {
int usableReplicas = numReplicas.liveReplicas() + int usableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissionedAndDecommissioning(); numReplicas.decommissionedAndDecommissioning();
if (block instanceof BlockInfoContiguous) { if (block instanceof BlockInfo) {
BlockCollection bc = ((BlockInfoContiguous) block).getBlockCollection(); BlockCollection bc = ((BlockInfo) block).getBlockCollection();
String fileName = (bc == null) ? "[orphaned]" : bc.getName(); String fileName = (bc == null) ? "[orphaned]" : bc.getName();
out.print(fileName + ": "); out.print(fileName + ": ");
} }
@ -598,15 +598,14 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number * @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
private static boolean commitBlock( private static boolean commitBlock(final BlockInfo block,
final BlockInfoContiguousUnderConstruction block,
final Block commitBlock) throws IOException { final Block commitBlock) throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED) if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false; return false;
assert block.getNumBytes() <= commitBlock.getNumBytes() : assert block.getNumBytes() <= commitBlock.getNumBytes() :
"commitBlock length is less than the stored one " "commitBlock length is less than the stored one "
+ commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); + commitBlock.getNumBytes() + " vs. " + block.getNumBytes();
block.commitBlock(commitBlock); BlockInfo.commitBlock(block, commitBlock);
return true; return true;
} }
@ -624,16 +623,16 @@ public class BlockManager {
Block commitBlock) throws IOException { Block commitBlock) throws IOException {
if(commitBlock == null) if(commitBlock == null)
return false; // not committing, this is a block allocation retry return false; // not committing, this is a block allocation retry
BlockInfoContiguous lastBlock = bc.getLastBlock(); BlockInfo lastBlock = bc.getLastBlock();
if(lastBlock == null) if(lastBlock == null)
return false; // no blocks in file yet return false; // no blocks in file yet
if(lastBlock.isComplete()) if(lastBlock.isComplete())
return false; // already completed (e.g. by syncBlock) return false; // already completed (e.g. by syncBlock)
final boolean b = commitBlock( final boolean b = commitBlock(lastBlock, commitBlock);
(BlockInfoContiguousUnderConstruction)lastBlock, commitBlock); if (countNodes(lastBlock).liveReplicas() >= minReplication) {
if(countNodes(lastBlock).liveReplicas() >= minReplication) completeBlock(bc, bc.numBlocks() - 1, false);
completeBlock(bc, bc.numBlocks()-1, false); }
return b; return b;
} }
@ -646,22 +645,25 @@ public class BlockManager {
*/ */
private BlockInfo completeBlock(final BlockCollection bc, private BlockInfo completeBlock(final BlockCollection bc,
final int blkIndex, boolean force) throws IOException { final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0) if (blkIndex < 0) {
return null; return null;
BlockInfoContiguous curBlock = bc.getBlocks()[blkIndex]; }
if (curBlock.isComplete()) BlockInfo curBlock = bc.getBlocks()[blkIndex];
if (curBlock.isComplete()) {
return curBlock; return curBlock;
// TODO: support BlockInfoStripedUC }
BlockInfoContiguousUnderConstruction ucBlock =
(BlockInfoContiguousUnderConstruction)curBlock; int numNodes = curBlock.numNodes();
int numNodes = ucBlock.numNodes(); if (!force && numNodes < minReplication) {
if (!force && numNodes < minReplication)
throw new IOException("Cannot complete block: " + throw new IOException("Cannot complete block: " +
"block does not satisfy minimal replication requirement."); "block does not satisfy minimal replication requirement.");
if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED) }
if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
throw new IOException( throw new IOException(
"Cannot complete block: block has not been COMMITTED by the client"); "Cannot complete block: block has not been COMMITTED by the client");
BlockInfoContiguous completeBlock = ucBlock.convertToCompleteBlock(); }
final BlockInfo completeBlock = BlockInfo.convertToCompleteBlock(curBlock);
// replace penultimate block in file // replace penultimate block in file
bc.setBlock(blkIndex, completeBlock); bc.setBlock(blkIndex, completeBlock);
@ -679,10 +681,9 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock); return blocksMap.replaceBlock(completeBlock);
} }
// TODO: support BlockInfoStrippedUC
private BlockInfo completeBlock(final BlockCollection bc, private BlockInfo completeBlock(final BlockCollection bc,
final BlockInfo block, boolean force) throws IOException { final BlockInfo block, boolean force) throws IOException {
BlockInfoContiguous[] fileBlocks = bc.getBlocks(); BlockInfo[] fileBlocks = bc.getBlocks();
for (int idx = 0; idx < fileBlocks.length; idx++) { for (int idx = 0; idx < fileBlocks.length; idx++) {
if (fileBlocks[idx] == block) { if (fileBlocks[idx] == block) {
return completeBlock(bc, idx, force); return completeBlock(bc, idx, force);
@ -698,6 +699,7 @@ public class BlockManager {
*/ */
public BlockInfo forceCompleteBlock(final BlockCollection bc, public BlockInfo forceCompleteBlock(final BlockCollection bc,
final BlockInfoContiguousUnderConstruction block) throws IOException { final BlockInfoContiguousUnderConstruction block) throws IOException {
// TODO: support BlockInfoStripedUC for editlog
block.commitBlock(block); block.commitBlock(block);
return completeBlock(bc, block, true); return completeBlock(bc, block, true);
} }
@ -719,7 +721,7 @@ public class BlockManager {
*/ */
public LocatedBlock convertLastBlockToUnderConstruction( public LocatedBlock convertLastBlockToUnderConstruction(
BlockCollection bc, long bytesToRemove) throws IOException { BlockCollection bc, long bytesToRemove) throws IOException {
BlockInfoContiguous oldBlock = bc.getLastBlock(); BlockInfo oldBlock = bc.getLastBlock();
if(oldBlock == null || if(oldBlock == null ||
bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove) bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove)
return null; return null;
@ -728,8 +730,10 @@ public class BlockManager {
DatanodeStorageInfo[] targets = getStorages(oldBlock); DatanodeStorageInfo[] targets = getStorages(oldBlock);
BlockInfoContiguousUnderConstruction ucBlock = bc.setLastBlock(oldBlock, // convert the last block to UC
targets); bc.convertLastBlockToUC(oldBlock, targets);
// get the new created uc block
BlockInfo ucBlock = bc.getLastBlock();
blocksMap.replaceBlock(ucBlock); blocksMap.replaceBlock(ucBlock);
// Remove block from replication queue. // Remove block from replication queue.
@ -771,11 +775,10 @@ public class BlockManager {
return locations; return locations;
} }
private List<LocatedBlock> createLocatedBlockList( private List<LocatedBlock> createLocatedBlockList(final BlockInfo[] blocks,
final BlockInfoContiguous[] blocks,
final long offset, final long length, final int nrBlocksToReturn, final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException { final AccessMode mode) throws IOException {
int curBlk = 0; int curBlk;
long curPos = 0, blkSize = 0; long curPos = 0, blkSize = 0;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
for (curBlk = 0; curBlk < nrBlocks; curBlk++) { for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
@ -788,10 +791,10 @@ public class BlockManager {
} }
if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file
return Collections.<LocatedBlock>emptyList(); return Collections.emptyList();
long endOff = offset + length; long endOff = offset + length;
List<LocatedBlock> results = new ArrayList<LocatedBlock>(blocks.length); List<LocatedBlock> results = new ArrayList<>(blocks.length);
do { do {
results.add(createLocatedBlock(blocks[curBlk], curPos, mode)); results.add(createLocatedBlock(blocks[curBlk], curPos, mode));
curPos += blocks[curBlk].getNumBytes(); curPos += blocks[curBlk].getNumBytes();
@ -802,9 +805,9 @@ public class BlockManager {
return results; return results;
} }
private LocatedBlock createLocatedBlock(final BlockInfoContiguous[] blocks, private LocatedBlock createLocatedBlock(final BlockInfo[] blocks,
final long endPos, final AccessMode mode) throws IOException { final long endPos, final AccessMode mode) throws IOException {
int curBlk = 0; int curBlk;
long curPos = 0; long curPos = 0;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
for (curBlk = 0; curBlk < nrBlocks; curBlk++) { for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
@ -817,8 +820,8 @@ public class BlockManager {
return createLocatedBlock(blocks[curBlk], curPos, mode); return createLocatedBlock(blocks[curBlk], curPos, mode);
} }
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos, private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos,
final AccessMode mode) throws IOException { final AccessMode mode) throws IOException {
final LocatedBlock lb = createLocatedBlock(blk, pos); final LocatedBlock lb = createLocatedBlock(blk, pos);
if (mode != null) { if (mode != null) {
@ -828,8 +831,8 @@ public class BlockManager {
} }
/** @return a LocatedBlock for the given block */ /** @return a LocatedBlock for the given block */
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos private LocatedBlock createLocatedBlock(final BlockInfo blk,
) throws IOException { final long pos) throws IOException {
if (blk instanceof BlockInfoContiguousUnderConstruction) { if (blk instanceof BlockInfoContiguousUnderConstruction) {
if (blk.isComplete()) { if (blk.isComplete()) {
throw new IOException( throw new IOException(
@ -842,6 +845,7 @@ public class BlockManager {
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
return newLocatedBlock(eb, storages, pos, false); return newLocatedBlock(eb, storages, pos, false);
} }
// TODO support BlockInfoStripedUC
// get block locations // get block locations
final int numCorruptNodes = countNodes(blk).corruptReplicas(); final int numCorruptNodes = countNodes(blk).corruptReplicas();
@ -877,7 +881,7 @@ public class BlockManager {
} }
/** Create a LocatedBlocks. */ /** Create a LocatedBlocks. */
public LocatedBlocks createLocatedBlocks(final BlockInfoContiguous[] blocks, public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
final long fileSizeExcludeBlocksUnderConstruction, final long fileSizeExcludeBlocksUnderConstruction,
final boolean isFileUnderConstruction, final long offset, final boolean isFileUnderConstruction, final long offset,
final long length, final boolean needBlockToken, final long length, final boolean needBlockToken,
@ -900,7 +904,7 @@ public class BlockManager {
final LocatedBlock lastlb; final LocatedBlock lastlb;
final boolean isComplete; final boolean isComplete;
if (!inSnapshot) { if (!inSnapshot) {
final BlockInfoContiguous last = blocks[blocks.length - 1]; final BlockInfo last = blocks[blocks.length - 1];
final long lastPos = last.isComplete()? final long lastPos = last.isComplete()?
fileSizeExcludeBlocksUnderConstruction - last.getNumBytes() fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
: fileSizeExcludeBlocksUnderConstruction; : fileSizeExcludeBlocksUnderConstruction;
@ -1724,12 +1728,15 @@ public class BlockManager {
* reported by the datanode in the block report. * reported by the datanode in the block report.
*/ */
static class StatefulBlockInfo { static class StatefulBlockInfo {
final BlockInfoContiguousUnderConstruction storedBlock; final BlockInfo storedBlock; // should be UC block
final Block reportedBlock; final Block reportedBlock;
final ReplicaState reportedState; final ReplicaState reportedState;
StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock, StatefulBlockInfo(BlockInfo storedBlock,
Block reportedBlock, ReplicaState reportedState) { Block reportedBlock, ReplicaState reportedState) {
Preconditions.checkArgument(
storedBlock instanceof BlockInfoContiguousUnderConstruction ||
storedBlock instanceof BlockInfoStripedUnderConstruction);
this.storedBlock = storedBlock; this.storedBlock = storedBlock;
this.reportedBlock = reportedBlock; this.reportedBlock = reportedBlock;
this.reportedState = reportedState; this.reportedState = reportedState;
@ -2133,15 +2140,12 @@ public class BlockManager {
// If block is under construction, add this replica to its list // If block is under construction, add this replica to its list
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
((BlockInfoContiguousUnderConstruction) storedBlock) BlockInfo.addReplica(storedBlock, storageInfo, iblk, reportedState);
.addReplicaIfNotPresent(storageInfo, iblk, reportedState);
// OpenFileBlocks only inside snapshots also will be added to safemode // OpenFileBlocks only inside snapshots also will be added to safemode
// threshold. So we need to update such blocks to safemode // threshold. So we need to update such blocks to safemode
// refer HDFS-5283 // refer HDFS-5283
BlockInfoContiguousUnderConstruction blockUC = if (namesystem.isInSnapshot(storedBlock.getBlockCollection())) {
(BlockInfoContiguousUnderConstruction) storedBlock; int numOfReplicas = BlockInfo.getNumExpectedLocations(storedBlock);
if (namesystem.isInSnapshot(blockUC)) {
int numOfReplicas = blockUC.getNumExpectedLocations();
namesystem.incrementSafeBlockCount(numOfReplicas); namesystem.incrementSafeBlockCount(numOfReplicas);
} }
//and fall through to next clause //and fall through to next clause
@ -2164,7 +2168,7 @@ public class BlockManager {
// place a delimiter in the list which separates blocks // place a delimiter in the list which separates blocks
// that have been reported from those that have not // that have been reported from those that have not
Block delimiterBlock = new Block(); Block delimiterBlock = new Block();
BlockInfoContiguous delimiter = new BlockInfoContiguous(delimiterBlock, BlockInfo delimiter = new BlockInfoContiguous(delimiterBlock,
(short) 1); (short) 1);
AddBlockResult result = storageInfo.addBlock(delimiter, delimiterBlock); AddBlockResult result = storageInfo.addBlock(delimiter, delimiterBlock);
assert result == AddBlockResult.ADDED assert result == AddBlockResult.ADDED
@ -2294,9 +2298,8 @@ public class BlockManager {
} }
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
toUC.add(new StatefulBlockInfo( toUC.add(new StatefulBlockInfo(storedBlock, new Block(block),
(BlockInfoContiguousUnderConstruction) storedBlock, reportedState));
new Block(block), reportedState));
return storedBlock; return storedBlock;
} }
@ -2487,9 +2490,8 @@ public class BlockManager {
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
DatanodeStorageInfo storageInfo) throws IOException { DatanodeStorageInfo storageInfo) throws IOException {
BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock; BlockInfo block = ucBlock.storedBlock;
block.addReplicaIfNotPresent( BlockInfo.addReplica(block, storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
if (ucBlock.reportedState == ReplicaState.FINALIZED && if (ucBlock.reportedState == ReplicaState.FINALIZED &&
(block.findStorageInfo(storageInfo) < 0)) { (block.findStorageInfo(storageInfo) < 0)) {
@ -2549,7 +2551,8 @@ public class BlockManager {
assert block != null && namesystem.hasWriteLock(); assert block != null && namesystem.hasWriteLock();
BlockInfo storedBlock; BlockInfo storedBlock;
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
if (block instanceof BlockInfoContiguousUnderConstruction) { if (block instanceof BlockInfoContiguousUnderConstruction ||
block instanceof BlockInfoStripedUnderConstruction) {
//refresh our copy in case the block got completed in another thread //refresh our copy in case the block got completed in another thread
storedBlock = getStoredBlock(block); storedBlock = getStoredBlock(block);
} else { } else {
@ -2565,7 +2568,6 @@ public class BlockManager {
return block; return block;
} }
BlockCollection bc = storedBlock.getBlockCollection(); BlockCollection bc = storedBlock.getBlockCollection();
assert bc != null : "Block must belong to a file";
// add block to the datanode // add block to the datanode
AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock); AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock);
@ -3493,8 +3495,8 @@ public class BlockManager {
* replicated. * replicated.
*/ */
public boolean checkBlocksProperlyReplicated( public boolean checkBlocksProperlyReplicated(
String src, BlockInfoContiguous[] blocks) { String src, BlockInfo[] blocks) {
for (BlockInfoContiguous b: blocks) { for (BlockInfo b: blocks) {
if (!b.isComplete()) { if (!b.isComplete()) {
final BlockInfoContiguousUnderConstruction uc = final BlockInfoContiguousUnderConstruction uc =
(BlockInfoContiguousUnderConstruction)b; (BlockInfoContiguousUnderConstruction)b;
@ -3563,7 +3565,7 @@ public class BlockManager {
if (!this.shouldCheckForEnoughRacks) { if (!this.shouldCheckForEnoughRacks) {
return true; return true;
} }
boolean enoughRacks = false;; boolean enoughRacks = false;
Collection<DatanodeDescriptor> corruptNodes = Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(b); corruptReplicas.getNodes(b);
int numExpectedReplicas = getReplication(b); int numExpectedReplicas = getReplication(b);
@ -3609,21 +3611,15 @@ public class BlockManager {
return this.neededReplications.getCorruptReplOneBlockSize(); return this.neededReplications.getCorruptReplOneBlockSize();
} }
public BlockInfoContiguous addBlockCollection(BlockInfoContiguous block, public BlockInfo addBlockCollection(BlockInfo block,
BlockCollection bc) { BlockCollection bc) {
// TODO return blocksMap.addBlockCollection(block, bc);
return (BlockInfoContiguous) blocksMap.addBlockCollection(block, bc);
} }
public BlockCollection getBlockCollection(Block b) { public BlockCollection getBlockCollection(Block b) {
return blocksMap.getBlockCollection(b); return blocksMap.getBlockCollection(b);
} }
/** @return an iterator of the datanodes. */
public Iterable<DatanodeStorageInfo> getStorages(final Block block) {
return blocksMap.getStorages(block);
}
public int numCorruptReplicas(Block block) { public int numCorruptReplicas(Block block) {
return corruptReplicas.numCorruptReplicas(block); return corruptReplicas.numCorruptReplicas(block);
} }
@ -3656,26 +3652,6 @@ public class BlockManager {
public int getCapacity() { public int getCapacity() {
return blocksMap.getCapacity(); return blocksMap.getCapacity();
} }
/**
* Return a range of corrupt replica block ids. Up to numExpectedBlocks
* blocks starting at the next block after startingBlockId are returned
* (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId
* is null, up to numExpectedBlocks blocks are returned from the beginning.
* If startingBlockId cannot be found, null is returned.
*
* @param numExpectedBlocks Number of block ids to return.
* 0 <= numExpectedBlocks <= 100
* @param startingBlockId Block id from which to start. If null, start at
* beginning.
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
public long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
Long startingBlockId) {
return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks,
startingBlockId);
}
/** /**
* Return an iterator over the set of blocks for which there are no replicas. * Return an iterator over the set of blocks for which there are no replicas.

View File

@ -369,7 +369,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
* @param file The file. * @param file The file.
*/ */
private void rescanFile(CacheDirective directive, INodeFile file) { private void rescanFile(CacheDirective directive, INodeFile file) {
BlockInfoContiguous[] blockInfos = file.getBlocks(); BlockInfo[] blockInfos = file.getBlocks();
// Increment the "needed" statistics // Increment the "needed" statistics
directive.addFilesNeeded(1); directive.addFilesNeeded(1);
@ -394,7 +394,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
} }
long cachedTotal = 0; long cachedTotal = 0;
for (BlockInfoContiguous blockInfo : blockInfos) { for (BlockInfo blockInfo : blockInfos) {
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) { if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
// We don't try to cache blocks that are under construction. // We don't try to cache blocks that are under construction.
LOG.trace("Directive {}: can't cache block {} because it is in state " LOG.trace("Directive {}: can't cache block {} because it is in state "
@ -452,8 +452,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
file.getFullPathName(), cachedTotal, neededTotal); file.getFullPathName(), cachedTotal, neededTotal);
} }
private String findReasonForNotCaching(CachedBlock cblock, private String findReasonForNotCaching(CachedBlock cblock,
BlockInfoContiguous blockInfo) { BlockInfo blockInfo) {
if (blockInfo == null) { if (blockInfo == null) {
// Somehow, a cache report with the block arrived, but the block // Somehow, a cache report with the block arrived, but the block
// reports from the DataNode haven't (yet?) described such a block. // reports from the DataNode haven't (yet?) described such a block.
@ -513,7 +513,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
iter.remove(); iter.remove();
} }
} }
BlockInfoContiguous blockInfo = namesystem.getStoredBlock(new Block(cblock.getBlockId())); BlockInfo blockInfo = namesystem.getStoredBlock(new Block(cblock.getBlockId()));
String reason = findReasonForNotCaching(cblock, blockInfo); String reason = findReasonForNotCaching(cblock, blockInfo);
int neededCached = 0; int neededCached = 0;
if (reason != null) { if (reason != null) {
@ -627,7 +627,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
List<DatanodeDescriptor> pendingCached) { List<DatanodeDescriptor> pendingCached) {
// To figure out which replicas can be cached, we consult the // To figure out which replicas can be cached, we consult the
// blocksMap. We don't want to try to cache a corrupt replica, though. // blocksMap. We don't want to try to cache a corrupt replica, though.
BlockInfoContiguous blockInfo = namesystem.getStoredBlock(new Block(cachedBlock.getBlockId())); BlockInfo blockInfo = namesystem.getStoredBlock(new Block(cachedBlock.getBlockId()));
if (blockInfo == null) { if (blockInfo == null) {
LOG.debug("Block {}: can't add new cached replicas," + LOG.debug("Block {}: can't add new cached replicas," +
" because there is no record of this block " + " because there is no record of this block " +
@ -665,7 +665,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
Iterator<CachedBlock> it = datanode.getPendingCached().iterator(); Iterator<CachedBlock> it = datanode.getPendingCached().iterator();
while (it.hasNext()) { while (it.hasNext()) {
CachedBlock cBlock = it.next(); CachedBlock cBlock = it.next();
BlockInfoContiguous info = BlockInfo info =
namesystem.getStoredBlock(new Block(cBlock.getBlockId())); namesystem.getStoredBlock(new Block(cBlock.getBlockId()));
if (info != null) { if (info != null) {
pendingBytes -= info.getNumBytes(); pendingBytes -= info.getNumBytes();
@ -675,7 +675,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
// Add pending uncached blocks from effective capacity // Add pending uncached blocks from effective capacity
while (it.hasNext()) { while (it.hasNext()) {
CachedBlock cBlock = it.next(); CachedBlock cBlock = it.next();
BlockInfoContiguous info = BlockInfo info =
namesystem.getStoredBlock(new Block(cBlock.getBlockId())); namesystem.getStoredBlock(new Block(cBlock.getBlockId()));
if (info != null) { if (info != null) {
pendingBytes += info.getNumBytes(); pendingBytes += info.getNumBytes();

View File

@ -143,6 +143,7 @@ class FSDirConcatOp {
throw new HadoopIllegalArgumentException("concat: source file " + src throw new HadoopIllegalArgumentException("concat: source file " + src
+ " is invalid or empty or underConstruction"); + " is invalid or empty or underConstruction");
} }
// source file's preferred block size cannot be greater than the target // source file's preferred block size cannot be greater than the target
// file // file
if (srcINodeFile.getPreferredBlockSize() > if (srcINodeFile.getPreferredBlockSize() >
@ -152,6 +153,11 @@ class FSDirConcatOp {
+ " which is greater than the target file's preferred block size " + " which is greater than the target file's preferred block size "
+ targetINode.getPreferredBlockSize()); + targetINode.getPreferredBlockSize());
} }
// TODO currently we do not support concatenating EC files
if (srcINodeFile.isStriped()) {
throw new HadoopIllegalArgumentException("concat: the src file " + src
+ " is with striped blocks");
}
si.add(srcINodeFile); si.add(srcINodeFile);
} }
@ -228,7 +234,7 @@ class FSDirConcatOp {
int count = 0; int count = 0;
for (INodeFile nodeToRemove : srcList) { for (INodeFile nodeToRemove : srcList) {
if(nodeToRemove != null) { if(nodeToRemove != null) {
nodeToRemove.setBlocks(null); nodeToRemove.setContiguousBlocks(null);
nodeToRemove.getParent().removeChild(nodeToRemove); nodeToRemove.getParent().removeChild(nodeToRemove);
fsd.getINodeMap().remove(nodeToRemove); fsd.getINodeMap().remove(nodeToRemove);
count++; count++;

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@ -919,7 +920,7 @@ public class FSDirectory implements Closeable {
unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null); unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);
if(! onBlockBoundary) { if(! onBlockBoundary) {
BlockInfoContiguous oldBlock = file.getLastBlock(); BlockInfo oldBlock = file.getLastBlock();
Block tBlk = Block tBlk =
getFSNamesystem().prepareFileForTruncate(iip, getFSNamesystem().prepareFileForTruncate(iip,
clientName, clientMachine, file.computeFileSize() - newLength, clientName, clientMachine, file.computeFileSize() - newLength,
@ -928,7 +929,7 @@ public class FSDirectory implements Closeable {
tBlk.getNumBytes() == truncateBlock.getNumBytes() : tBlk.getNumBytes() == truncateBlock.getNumBytes() :
"Should be the same block."; "Should be the same block.";
if(oldBlock.getBlockId() != tBlk.getBlockId() && if(oldBlock.getBlockId() != tBlk.getBlockId() &&
!file.isBlockInLatestSnapshot(oldBlock)) { !file.isBlockInLatestSnapshot((BlockInfoContiguous) oldBlock)) {
getBlockManager().removeBlockFromMap(oldBlock); getBlockManager().removeBlockFromMap(oldBlock);
} }
} }

View File

@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@ -773,10 +773,10 @@ public class FSEditLog implements LogsPurgeable {
public void logAddBlock(String path, INodeFile file) { public void logAddBlock(String path, INodeFile file) {
Preconditions.checkArgument(file.isUnderConstruction()); Preconditions.checkArgument(file.isUnderConstruction());
BlockInfoContiguous[] blocks = file.getBlocks(); BlockInfo[] blocks = file.getBlocks();
Preconditions.checkState(blocks != null && blocks.length > 0); Preconditions.checkState(blocks != null && blocks.length > 0);
BlockInfoContiguous pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null; BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
BlockInfoContiguous lastBlock = blocks[blocks.length - 1]; BlockInfo lastBlock = blocks[blocks.length - 1];
AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path) AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
.setPenultimateBlock(pBlock).setLastBlock(lastBlock); .setPenultimateBlock(pBlock).setLastBlock(lastBlock);
logEdit(op); logEdit(op);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -506,7 +507,7 @@ public class FSEditLogLoader {
} }
INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path); INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path);
// add the new block to the INodeFile // add the new block to the INodeFile
addNewBlock(fsDir, addBlockOp, oldFile); addNewBlock(addBlockOp, oldFile);
break; break;
} }
case OP_SET_REPLICATION: { case OP_SET_REPLICATION: {
@ -939,15 +940,15 @@ public class FSEditLogLoader {
/** /**
* Add a new block into the given INodeFile * Add a new block into the given INodeFile
* TODO support adding striped block
*/ */
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file) private void addNewBlock(AddBlockOp op, INodeFile file) throws IOException {
throws IOException { BlockInfo[] oldBlocks = file.getBlocks();
BlockInfoContiguous[] oldBlocks = file.getBlocks();
Block pBlock = op.getPenultimateBlock(); Block pBlock = op.getPenultimateBlock();
Block newBlock= op.getLastBlock(); Block newBlock= op.getLastBlock();
if (pBlock != null) { // the penultimate block is not null if (pBlock != null) { // the penultimate block is not null
Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0); assert oldBlocks != null && oldBlocks.length > 0;
// compare pBlock with the last block of oldBlocks // compare pBlock with the last block of oldBlocks
Block oldLastBlock = oldBlocks[oldBlocks.length - 1]; Block oldLastBlock = oldBlocks[oldBlocks.length - 1];
if (oldLastBlock.getBlockId() != pBlock.getBlockId() if (oldLastBlock.getBlockId() != pBlock.getBlockId()
@ -977,12 +978,13 @@ public class FSEditLogLoader {
/** /**
* Update in-memory data structures with new block information. * Update in-memory data structures with new block information.
* TODO support adding striped block
* @throws IOException * @throws IOException
*/ */
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
INodesInPath iip, INodeFile file) throws IOException { INodesInPath iip, INodeFile file) throws IOException {
// Update its block list // Update its block list
BlockInfoContiguous[] oldBlocks = file.getBlocks(); BlockInfo[] oldBlocks = file.getBlocks();
Block[] newBlocks = op.getBlocks(); Block[] newBlocks = op.getBlocks();
String path = op.getPath(); String path = op.getPath();
@ -991,7 +993,7 @@ public class FSEditLogLoader {
// First, update blocks in common // First, update blocks in common
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) { for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
BlockInfoContiguous oldBlock = oldBlocks[i]; BlockInfo oldBlock = oldBlocks[i];
Block newBlock = newBlocks[i]; Block newBlock = newBlocks[i];
boolean isLastBlock = i == newBlocks.length - 1; boolean isLastBlock = i == newBlocks.length - 1;

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -686,7 +687,7 @@ public class FSImageFormat {
public void updateBlocksMap(INodeFile file) { public void updateBlocksMap(INodeFile file) {
// Add file->block mapping // Add file->block mapping
final BlockInfoContiguous[] blocks = file.getBlocks(); final BlockInfo[] blocks = file.getBlocks();
if (blocks != null) { if (blocks != null) {
final BlockManager bm = namesystem.getBlockManager(); final BlockManager bm = namesystem.getBlockManager();
for (int i = 0; i < blocks.length; i++) { for (int i = 0; i < blocks.length; i++) {
@ -958,9 +959,9 @@ public class FSImageFormat {
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature(); FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
if (oldnode.numBlocks() > 0) { if (oldnode.numBlocks() > 0) {
BlockInfoContiguous ucBlock = cons.getLastBlock(); BlockInfo ucBlock = cons.getLastBlock();
// we do not replace the inode, just replace the last block of oldnode // we do not replace the inode, just replace the last block of oldnode
BlockInfoContiguous info = namesystem.getBlockManager().addBlockCollection( BlockInfo info = namesystem.getBlockManager().addBlockCollection(
ucBlock, oldnode); ucBlock, oldnode);
oldnode.setBlock(oldnode.numBlocks() - 1, info); oldnode.setBlock(oldnode.numBlocks() - 1, info);
} }

View File

@ -42,9 +42,13 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
@ -53,6 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructio
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.StripedBlocksFeature;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto;
@ -211,7 +216,7 @@ public final class FSImageFormatPBINode {
public static void updateBlocksMap(INodeFile file, BlockManager bm) { public static void updateBlocksMap(INodeFile file, BlockManager bm) {
// Add file->block mapping // Add file->block mapping
final BlockInfoContiguous[] blocks = file.getBlocks(); final BlockInfo[] blocks = file.getBlocks();
if (blocks != null) { if (blocks != null) {
for (int i = 0; i < blocks.length; i++) { for (int i = 0; i < blocks.length; i++) {
file.setBlock(i, bm.addBlockCollection(blocks[i], file)); file.setBlock(i, bm.addBlockCollection(blocks[i], file));
@ -347,16 +352,30 @@ public final class FSImageFormatPBINode {
loadXAttrs(f.getXAttrs(), state.getStringTable()))); loadXAttrs(f.getXAttrs(), state.getStringTable())));
} }
FileWithStripedBlocksFeature stripeFeature = null;
if (f.hasStripedBlocks()) {
StripedBlocksFeature sb = f.getStripedBlocks();
stripeFeature = file.addStripedBlocksFeature();
for (StripedBlockProto sp : sb.getBlocksList()) {
stripeFeature.addBlock(PBHelper.convert(sp));
}
}
// under-construction information // under-construction information
if (f.hasFileUC()) { if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
if (blocks.length > 0) { BlockInfo lastBlk = file.getLastBlock();
BlockInfoContiguous lastBlk = file.getLastBlock(); // replace the last block of file
// replace the last block of file final BlockInfo ucBlk;
file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction( if (stripeFeature != null) {
lastBlk, replication)); BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
ucBlk = new BlockInfoStripedUnderConstruction(striped,
striped.getDataBlockNum(), striped.getParityBlockNum());
} else {
ucBlk = new BlockInfoContiguousUnderConstruction(lastBlk, replication);
} }
file.setBlock(file.numBlocks() - 1, ucBlk);
} }
return file; return file;
} }
@ -630,6 +649,19 @@ public final class FSImageFormatPBINode {
} }
} }
FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature();
if (sb != null) {
StripedBlocksFeature.Builder builder =
StripedBlocksFeature.newBuilder();
BlockInfoStriped[] sblocks = sb.getBlocks();
if (sblocks != null) {
for (BlockInfoStriped sblk : sblocks) {
builder.addBlocks(PBHelper.convert(sblk));
}
}
b.setStripedBlocks(builder.build());
}
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature(); FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
if (uc != null) { if (uc != null) {
INodeSection.FileUnderConstructionFeature f = INodeSection.FileUnderConstructionFeature f =
@ -658,7 +690,7 @@ public final class FSImageFormatPBINode {
r.writeDelimitedTo(out); r.writeDelimitedTo(out);
} }
private final INodeSection.INode.Builder buildINodeCommon(INode n) { private INodeSection.INode.Builder buildINodeCommon(INode n) {
return INodeSection.INode.newBuilder() return INodeSection.INode.newBuilder()
.setId(n.getId()) .setId(n.getId())
.setName(ByteString.copyFrom(n.getLocalNameBytes())); .setName(ByteString.copyFrom(n.getLocalNameBytes()));

View File

@ -203,8 +203,10 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -2009,6 +2011,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final BlockStoragePolicy lpPolicy = final BlockStoragePolicy lpPolicy =
blockManager.getStoragePolicy("LAZY_PERSIST"); blockManager.getStoragePolicy("LAZY_PERSIST");
// not support truncating file with striped blocks
if (file.isStriped()) {
throw new UnsupportedOperationException(
"Cannot truncate file with striped block " + src);
}
if (lpPolicy != null && if (lpPolicy != null &&
lpPolicy.getId() == file.getStoragePolicyID()) { lpPolicy.getId() == file.getStoragePolicyID()) {
throw new UnsupportedOperationException( throw new UnsupportedOperationException(
@ -2090,8 +2097,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
leaseManager.addLease( leaseManager.addLease(
file.getFileUnderConstructionFeature().getClientName(), file.getId()); file.getFileUnderConstructionFeature().getClientName(), file.getId());
boolean shouldRecoverNow = (newBlock == null); boolean shouldRecoverNow = (newBlock == null);
BlockInfoContiguous oldBlock = file.getLastBlock();
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock); BlockInfo oldBlock = file.getLastBlock();
assert oldBlock instanceof BlockInfoContiguous;
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file,
(BlockInfoContiguous) oldBlock);
if(newBlock == null) { if(newBlock == null) {
newBlock = (shouldCopyOnTruncate) ? createNewBlock(file.isStriped()) : newBlock = (shouldCopyOnTruncate) ? createNewBlock(file.isStriped()) :
new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(),
@ -2106,7 +2117,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
file.getPreferredBlockReplication()); file.getPreferredBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock); truncatedBlockUC.setTruncateBlock(oldBlock);
file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock)); file.convertLastBlockToUC(truncatedBlockUC,
blockManager.getStorages(oldBlock));
getBlockManager().addBlockCollection(truncatedBlockUC, file); getBlockManager().addBlockCollection(truncatedBlockUC, file);
NameNode.stateChangeLog.info("BLOCK* prepareFileForTruncate: " NameNode.stateChangeLog.info("BLOCK* prepareFileForTruncate: "
@ -2494,6 +2506,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
+ src + " for client " + clientMachine); + src + " for client " + clientMachine);
} }
INodeFile myFile = INodeFile.valueOf(inode, src, true); INodeFile myFile = INodeFile.valueOf(inode, src, true);
// not support appending file with striped blocks
if (myFile.isStriped()) {
throw new UnsupportedOperationException(
"Cannot truncate file with striped block " + src);
}
final BlockStoragePolicy lpPolicy = final BlockStoragePolicy lpPolicy =
blockManager.getStoragePolicy("LAZY_PERSIST"); blockManager.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null && if (lpPolicy != null &&
@ -2505,7 +2524,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder, recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder,
clientMachine, false); clientMachine, false);
final BlockInfoContiguous lastBlock = myFile.getLastBlock(); final BlockInfoContiguous lastBlock =
(BlockInfoContiguous) myFile.getLastBlock();
// Check that the block has at least minimum replication. // Check that the block has at least minimum replication.
if(lastBlock != null && lastBlock.isComplete() && if(lastBlock != null && lastBlock.isComplete() &&
!getBlockManager().isSufficientlyReplicated(lastBlock)) { !getBlockManager().isSufficientlyReplicated(lastBlock)) {
@ -2561,7 +2581,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
} }
} else { } else {
BlockInfoContiguous lastBlock = file.getLastBlock(); BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) { if (lastBlock != null) {
ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock); ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
ret = new LocatedBlock(blk, new DatanodeInfo[0]); ret = new LocatedBlock(blk, new DatanodeInfo[0]);
@ -2740,7 +2760,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
op.getExceptionMessage(src, holder, clientMachine, op.getExceptionMessage(src, holder, clientMachine,
"lease recovery is in progress. Try again later.")); "lease recovery is in progress. Try again later."));
} else { } else {
final BlockInfoContiguous lastBlock = file.getLastBlock(); final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException( throw new RecoveryInProgressException(
@ -3066,13 +3086,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
.getBlocks()); .getBlocks());
} else { } else {
// check the penultimate block of this file // check the penultimate block of this file
BlockInfoContiguous b = v.getPenultimateBlock(); BlockInfo b = v.getPenultimateBlock();
return b == null || return b == null ||
blockManager.checkBlocksProperlyReplicated( blockManager.checkBlocksProperlyReplicated(
src, new BlockInfoContiguous[] { b }); src, new BlockInfo[] { b });
} }
} }
/** /**
* Change the indicated filename. * Change the indicated filename.
* @deprecated Use {@link #renameTo(String, String, boolean, * @deprecated Use {@link #renameTo(String, String, boolean,
@ -3243,7 +3263,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
for (Block b : blocks.getToDeleteList()) { for (Block b : blocks.getToDeleteList()) {
if (trackBlockCounts) { if (trackBlockCounts) {
BlockInfoContiguous bi = getStoredBlock(b); BlockInfo bi = getStoredBlock(b);
if (bi.isComplete()) { if (bi.isComplete()) {
numRemovedComplete++; numRemovedComplete++;
if (bi.numNodes() >= blockManager.minReplication) { if (bi.numNodes() >= blockManager.minReplication) {
@ -3467,10 +3487,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final INodeFile pendingFile = iip.getLastINode().asFile(); final INodeFile pendingFile = iip.getLastINode().asFile();
int nrBlocks = pendingFile.numBlocks(); int nrBlocks = pendingFile.numBlocks();
BlockInfoContiguous[] blocks = pendingFile.getBlocks(); BlockInfo[] blocks = pendingFile.getBlocks();
int nrCompleteBlocks; int nrCompleteBlocks;
BlockInfoContiguous curBlock = null; BlockInfo curBlock = null;
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) { for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
curBlock = blocks[nrCompleteBlocks]; curBlock = blocks[nrCompleteBlocks];
if(!curBlock.isComplete()) if(!curBlock.isComplete())
@ -3505,12 +3525,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// The last block is not COMPLETE, and // The last block is not COMPLETE, and
// that the penultimate block if exists is either COMPLETE or COMMITTED // that the penultimate block if exists is either COMPLETE or COMMITTED
final BlockInfoContiguous lastBlock = pendingFile.getLastBlock(); final BlockInfo lastBlock = pendingFile.getLastBlock();
BlockUCState lastBlockState = lastBlock.getBlockUCState(); BlockUCState lastBlockState = lastBlock.getBlockUCState();
BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock(); BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
// If penultimate block doesn't exist then its minReplication is met // If penultimate block doesn't exist then its minReplication is met
boolean penultimateBlockMinReplication = penultimateBlock == null ? true : boolean penultimateBlockMinReplication = penultimateBlock == null ||
blockManager.checkMinReplication(penultimateBlock); blockManager.checkMinReplication(penultimateBlock);
switch(lastBlockState) { switch(lastBlockState) {
@ -3540,6 +3560,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
throw new AlreadyBeingCreatedException(message); throw new AlreadyBeingCreatedException(message);
case UNDER_CONSTRUCTION: case UNDER_CONSTRUCTION:
case UNDER_RECOVERY: case UNDER_RECOVERY:
// TODO support Striped block's recovery
final BlockInfoContiguousUnderConstruction uc = final BlockInfoContiguousUnderConstruction uc =
(BlockInfoContiguousUnderConstruction)lastBlock; (BlockInfoContiguousUnderConstruction)lastBlock;
// determine if last block was intended to be truncated // determine if last block was intended to be truncated
@ -3651,14 +3672,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
blockManager.checkReplication(pendingFile); blockManager.checkReplication(pendingFile);
} }
public BlockInfoContiguous getStoredBlock(Block block) { public BlockInfo getStoredBlock(Block block) {
return (BlockInfoContiguous) blockManager.getStoredBlock(block); return blockManager.getStoredBlock(block);
} }
@Override @Override
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { public boolean isInSnapshot(BlockCollection bc) {
assert hasReadLock(); assert hasReadLock();
final BlockCollection bc = blockUC.getBlockCollection();
if (bc == null || !(bc instanceof INodeFile) if (bc == null || !(bc instanceof INodeFile)
|| !bc.isUnderConstruction()) { || !bc.isUnderConstruction()) {
return false; return false;
@ -3703,7 +3723,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
waitForLoadingFSImage(); waitForLoadingFSImage();
writeLock(); writeLock();
boolean copyTruncate = false; boolean copyTruncate = false;
BlockInfoContiguousUnderConstruction truncatedBlock = null; BlockInfo truncatedBlock = null;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
// If a DN tries to commit to the standby, the recovery will // If a DN tries to commit to the standby, the recovery will
@ -3711,7 +3731,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkNameNodeSafeMode( checkNameNodeSafeMode(
"Cannot commitBlockSynchronization while in safe mode"); "Cannot commitBlockSynchronization while in safe mode");
final BlockInfoContiguous storedBlock = getStoredBlock( final BlockInfo storedBlock = getStoredBlock(
ExtendedBlock.getLocalBlock(oldBlock)); ExtendedBlock.getLocalBlock(oldBlock));
if (storedBlock == null) { if (storedBlock == null) {
if (deleteblock) { if (deleteblock) {
@ -3760,9 +3780,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return; return;
} }
truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile truncatedBlock = iFile.getLastBlock();
.getLastBlock(); long recoveryId = BlockInfo.getBlockRecoveryId(truncatedBlock);
long recoveryId = truncatedBlock.getBlockRecoveryId();
copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId(); copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
if(recoveryId != newgenerationstamp) { if(recoveryId != newgenerationstamp) {
throw new IOException("The recovery id " + newgenerationstamp throw new IOException("The recovery id " + newgenerationstamp
@ -3776,8 +3795,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (remove) { if (remove) {
blockManager.removeBlock(storedBlock); blockManager.removeBlock(storedBlock);
} }
} } else {
else {
// update last block // update last block
if(!copyTruncate) { if(!copyTruncate) {
storedBlock.setGenerationStamp(newgenerationstamp); storedBlock.setGenerationStamp(newgenerationstamp);
@ -3825,9 +3843,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]), trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]),
trimmedStorages.toArray(new String[trimmedStorages.size()])); trimmedStorages.toArray(new String[trimmedStorages.size()]));
if(copyTruncate) { if(copyTruncate) {
iFile.setLastBlock(truncatedBlock, trimmedStorageInfos); iFile.convertLastBlockToUC(truncatedBlock, trimmedStorageInfos);
} else { } else {
iFile.setLastBlock(storedBlock, trimmedStorageInfos); iFile.convertLastBlockToUC(storedBlock, trimmedStorageInfos);
if (closeFile) { if (closeFile) {
blockManager.markBlockReplicasAsCorrupt(storedBlock, blockManager.markBlockReplicasAsCorrupt(storedBlock,
oldGenerationStamp, oldNumBytes, trimmedStorageInfos); oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
@ -3838,7 +3856,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (closeFile) { if (closeFile) {
if(copyTruncate) { if(copyTruncate) {
src = closeFileCommitBlocks(iFile, truncatedBlock); src = closeFileCommitBlocks(iFile, truncatedBlock);
if(!iFile.isBlockInLatestSnapshot(storedBlock)) { if(!iFile.isBlockInLatestSnapshot((BlockInfoContiguous) storedBlock)) {
blockManager.removeBlock(storedBlock); blockManager.removeBlock(storedBlock);
} }
} else { } else {
@ -3872,7 +3890,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @throws IOException on error * @throws IOException on error
*/ */
@VisibleForTesting @VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock) String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
throws IOException { throws IOException {
final INodesInPath iip = INodesInPath.fromINode(pendingFile); final INodesInPath iip = INodesInPath.fromINode(pendingFile);
final String src = iip.getPath(); final String src = iip.getPath();
@ -4163,7 +4181,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
while (it.hasNext()) { while (it.hasNext()) {
Block b = it.next(); Block b = it.next();
BlockInfoContiguous blockInfo = getStoredBlock(b); BlockInfo blockInfo = getStoredBlock(b);
if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) { if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) {
filesToDelete.add(blockInfo.getBlockCollection()); filesToDelete.add(blockInfo.getBlockCollection());
} }
@ -5105,7 +5123,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) // mostly true if (safeMode == null) // mostly true
return; return;
BlockInfoContiguous storedBlock = getStoredBlock(b); BlockInfo storedBlock = getStoredBlock(b);
if (storedBlock.isComplete()) { if (storedBlock.isComplete()) {
safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas()); safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
} }
@ -5667,7 +5685,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
+ "access token for block " + block); + "access token for block " + block);
// check stored block state // check stored block state
BlockInfoContiguous storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
if (storedBlock == null || if (storedBlock == null ||
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) { storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
throw new IOException(block + throw new IOException(block +
@ -5796,8 +5814,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
assert hasWriteLock(); assert hasWriteLock();
// check the vadility of the block and lease holder name // check the vadility of the block and lease holder name
final INodeFile pendingFile = checkUCBlock(oldBlock, clientName); final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
final BlockInfoContiguousUnderConstruction blockinfo final BlockInfo lastBlock = pendingFile.getLastBlock();
= (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock(); // when updating pipeline, the last block must be contiguous block
assert lastBlock instanceof BlockInfoContiguousUnderConstruction;
BlockInfoContiguousUnderConstruction blockinfo =
(BlockInfoContiguousUnderConstruction) lastBlock;
// check new GS & length: this is not expected // check new GS & length: this is not expected
if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() || if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||

View File

@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
/** /**
@ -58,12 +60,12 @@ public class FileUnderConstructionFeature implements INode.Feature {
*/ */
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength) void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
throws IOException { throws IOException {
BlockInfoContiguous lastBlock = f.getLastBlock(); BlockInfo lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path " assert (lastBlock != null) : "The last block for path "
+ f.getFullPathName() + " is null when updating its length"; + f.getFullPathName() + " is null when updating its length";
assert (lastBlock instanceof BlockInfoContiguousUnderConstruction) assert !lastBlock.isComplete()
: "The last block for path " + f.getFullPathName() : "The last block for path " + f.getFullPathName()
+ " is not a BlockInfoUnderConstruction when updating its length"; + " is not a BlockInfoUnderConstruction when updating its length";
lastBlock.setNumBytes(lastBlockLength); lastBlock.setNumBytes(lastBlockLength);
} }
@ -74,11 +76,10 @@ public class FileUnderConstructionFeature implements INode.Feature {
*/ */
void cleanZeroSizeBlock(final INodeFile f, void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) { final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfoContiguous[] blocks = f.getBlocks(); final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0 if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) { && !blocks[blocks.length - 1].isComplete()) {
BlockInfoContiguousUnderConstruction lastUC = BlockInfo lastUC = blocks[blocks.length - 1];
(BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) { if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here // this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC); collectedBlocks.addDeleteBlock(lastUC);

View File

@ -0,0 +1,112 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
/**
* Feature for file with striped blocks
*/
class FileWithStripedBlocksFeature implements INode.Feature {
private BlockInfoStriped[] blocks;
FileWithStripedBlocksFeature() {
blocks = new BlockInfoStriped[0];
}
FileWithStripedBlocksFeature(BlockInfoStriped[] blocks) {
Preconditions.checkArgument(blocks != null);
this.blocks = blocks;
}
BlockInfoStriped[] getBlocks() {
return this.blocks;
}
void setBlock(int index, BlockInfoStriped blk) {
blocks[index] = blk;
}
BlockInfoStriped getLastBlock() {
return blocks == null || blocks.length == 0 ?
null : blocks[blocks.length - 1];
}
int numBlocks() {
return blocks == null ? 0 : blocks.length;
}
void updateBlockCollection(INodeFile file) {
if (blocks != null) {
for (BlockInfoStriped blk : blocks) {
blk.setBlockCollection(file);
}
}
}
private void setBlocks(BlockInfoStriped[] blocks) {
this.blocks = blocks;
}
void addBlock(BlockInfoStriped newBlock) {
if (this.blocks == null) {
this.setBlocks(new BlockInfoStriped[]{newBlock});
} else {
int size = this.blocks.length;
BlockInfoStriped[] newlist = new BlockInfoStriped[size + 1];
System.arraycopy(this.blocks, 0, newlist, 0, size);
newlist[size] = newBlock;
this.setBlocks(newlist);
}
}
boolean removeLastBlock(Block oldblock) {
if (blocks == null || blocks.length == 0) {
return false;
}
int newSize = blocks.length - 1;
if (!blocks[newSize].equals(oldblock)) {
return false;
}
//copy to a new list
BlockInfoStriped[] newlist = new BlockInfoStriped[newSize];
System.arraycopy(blocks, 0, newlist, 0, newSize);
setBlocks(newlist);
return true;
}
void truncateStripedBlocks(int n) {
final BlockInfoStriped[] newBlocks;
if (n == 0) {
newBlocks = new BlockInfoStriped[0];
} else {
newBlocks = new BlockInfoStriped[n];
System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
}
// set new blocks
setBlocks(newBlocks);
}
void clear() {
this.blocks = null;
}
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
@ -37,12 +38,12 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
@ -174,6 +175,31 @@ public class INodeFile extends INodeWithAdditionalFields
&& getXAttrFeature() == other.getXAttrFeature(); && getXAttrFeature() == other.getXAttrFeature();
} }
/* Start of StripedBlock Feature */
public final FileWithStripedBlocksFeature getStripedBlocksFeature() {
return getFeature(FileWithStripedBlocksFeature.class);
}
public FileWithStripedBlocksFeature addStripedBlocksFeature() {
assert blocks == null || blocks.length == 0:
"The file contains contiguous blocks";
assert !isWithStripedBlocks();
this.setFileReplication((short) 0);
FileWithStripedBlocksFeature sb = new FileWithStripedBlocksFeature();
addFeature(sb);
return sb;
}
public boolean isWithStripedBlocks() {
return getStripedBlocksFeature() != null;
}
/** Used to make sure there is no contiguous block related info */
private boolean hasNoContiguousBlock() {
return (blocks == null || blocks.length == 0) && getFileReplication() == 0;
}
/* Start of Under-Construction Feature */ /* Start of Under-Construction Feature */
/** /**
@ -208,7 +234,7 @@ public class INodeFile extends INodeWithAdditionalFields
"file is no longer under construction"); "file is no longer under construction");
FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
if (uc != null) { if (uc != null) {
assertAllBlocksComplete(); assertAllBlocksComplete(getBlocks());
removeFeature(uc); removeFeature(uc);
this.setModificationTime(mtime); this.setModificationTime(mtime);
} }
@ -216,37 +242,56 @@ public class INodeFile extends INodeWithAdditionalFields
} }
/** Assert all blocks are complete. */ /** Assert all blocks are complete. */
private void assertAllBlocksComplete() { private void assertAllBlocksComplete(BlockInfo[] blks) {
if (blocks == null) { if (blks == null) {
return; return;
} }
for (int i = 0; i < blocks.length; i++) { for (int i = 0; i < blks.length; i++) {
Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize" Preconditions.checkState(blks[i].isComplete(), "Failed to finalize"
+ " %s %s since blocks[%s] is non-complete, where blocks=%s.", + " %s %s since blocks[%s] is non-complete, where blocks=%s.",
getClass().getSimpleName(), this, i, Arrays.asList(blocks)); getClass().getSimpleName(), this, i, Arrays.asList(blks));
} }
} }
/**
* Instead of adding a new block, this function is usually used while loading
* fsimage or converting the last block to UC/Complete.
*/
@Override // BlockCollection @Override // BlockCollection
public void setBlock(int index, BlockInfoContiguous blk) { public void setBlock(int index, BlockInfo blk) {
this.blocks[index] = blk; FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb == null) {
assert blk instanceof BlockInfoContiguous;
this.blocks[index] = (BlockInfoContiguous) blk;
} else {
assert blk instanceof BlockInfoStriped;
assert hasNoContiguousBlock();
sb.setBlock(index, (BlockInfoStriped) blk);
}
} }
@Override // BlockCollection, the file should be under construction @Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock( public void convertLastBlockToUC(BlockInfo lastBlock,
BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations) DatanodeStorageInfo[] locations) throws IOException {
throws IOException {
Preconditions.checkState(isUnderConstruction(), Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction"); "file is no longer under construction");
if (numBlocks() == 0) { if (numBlocks() == 0) {
throw new IOException("Failed to set last block: File is empty."); throw new IOException("Failed to set last block: File is empty.");
} }
BlockInfoContiguousUnderConstruction ucBlock =
lastBlock.convertToBlockUnderConstruction( final BlockInfo ucBlock;
BlockUCState.UNDER_CONSTRUCTION, locations); FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb == null) {
assert lastBlock instanceof BlockInfoContiguous;
ucBlock = ((BlockInfoContiguous) lastBlock)
.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, locations);
} else {
assert hasNoContiguousBlock();
assert lastBlock instanceof BlockInfoStriped;
ucBlock = ((BlockInfoStriped) lastBlock)
.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, locations);
}
setBlock(numBlocks() - 1, ucBlock); setBlock(numBlocks() - 1, ucBlock);
return ucBlock;
} }
/** /**
@ -256,21 +301,27 @@ public class INodeFile extends INodeWithAdditionalFields
BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) { BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(), Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction"); "file is no longer under construction");
if (blocks == null || blocks.length == 0) { FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
return null; if (sb == null) {
} if (blocks == null || blocks.length == 0) {
int size_1 = blocks.length - 1; return null;
if (!blocks[size_1].equals(oldblock)) { }
return null; int size_1 = blocks.length - 1;
} if (!blocks[size_1].equals(oldblock)) {
return null;
}
BlockInfoContiguousUnderConstruction uc = BlockInfoContiguousUnderConstruction uc =
(BlockInfoContiguousUnderConstruction)blocks[size_1]; (BlockInfoContiguousUnderConstruction)blocks[size_1];
//copy to a new list //copy to a new list
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1]; BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
System.arraycopy(blocks, 0, newlist, 0, size_1); System.arraycopy(blocks, 0, newlist, 0, size_1);
setBlocks(newlist); setContiguousBlocks(newlist);
return uc; return uc;
} else {
assert hasNoContiguousBlock();
return null;
}
} }
/* End of Under-Construction Feature */ /* End of Under-Construction Feature */
@ -371,13 +422,15 @@ public class INodeFile extends INodeWithAdditionalFields
} }
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */
public final void setFileReplication(short replication) { private void setFileReplication(short replication) {
header = HeaderFormat.REPLICATION.BITS.combine(replication, header); header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
} }
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication, public final INodeFile setFileReplication(short replication,
int latestSnapshotId) throws QuotaExceededException { int latestSnapshotId) throws QuotaExceededException {
Preconditions.checkState(!isWithStripedBlocks(),
"Cannot set replication to a file with striped blocks");
recordModification(latestSnapshotId); recordModification(latestSnapshotId);
setFileReplication(replication); setFileReplication(replication);
return this; return this;
@ -415,37 +468,57 @@ public class INodeFile extends INodeWithAdditionalFields
setStoragePolicyID(storagePolicyId); setStoragePolicyID(storagePolicyId);
} }
@Override @Override // INodeFileAttributes
public long getHeaderLong() { public long getHeaderLong() {
return header; return header;
} }
/** @return the blocks of the file. */ /** @return the blocks of the file. */
@Override @Override // BlockCollection
public BlockInfoContiguous[] getBlocks() { public BlockInfo[] getBlocks() {
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb != null) {
assert hasNoContiguousBlock();
return sb.getBlocks();
} else {
return this.blocks;
}
}
/** Used by snapshot diff */
public BlockInfoContiguous[] getContiguousBlocks() {
return this.blocks; return this.blocks;
} }
/** @return blocks of the file corresponding to the snapshot. */ /** @return blocks of the file corresponding to the snapshot. */
public BlockInfoContiguous[] getBlocks(int snapshot) { public BlockInfo[] getBlocks(int snapshot) {
if(snapshot == CURRENT_STATE_ID || getDiffs() == null) if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
return getBlocks(); return getBlocks();
}
// find blocks stored in snapshot diffs (for truncate)
FileDiff diff = getDiffs().getDiffById(snapshot); FileDiff diff = getDiffs().getDiffById(snapshot);
BlockInfoContiguous[] snapshotBlocks = // note that currently FileDiff can only store contiguous blocks
diff == null ? getBlocks() : diff.getBlocks(); BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
if(snapshotBlocks != null) if (snapshotBlocks != null) {
return snapshotBlocks; return snapshotBlocks;
}
// Blocks are not in the current snapshot // Blocks are not in the current snapshot
// Find next snapshot with blocks present or return current file blocks // Find next snapshot with blocks present or return current file blocks
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot); snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks; return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
} }
void updateBlockCollection() { /** Used during concat to update the BlockCollection for each block */
if (blocks != null) { private void updateBlockCollection() {
if (blocks != null && blocks.length > 0) {
for(BlockInfoContiguous b : blocks) { for(BlockInfoContiguous b : blocks) {
b.setBlockCollection(this); b.setBlockCollection(this);
} }
} else {
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb != null) {
sb.updateBlockCollection(this);
}
} }
} }
@ -468,27 +541,27 @@ public class INodeFile extends INodeWithAdditionalFields
size += in.blocks.length; size += in.blocks.length;
} }
setBlocks(newlist); setContiguousBlocks(newlist);
updateBlockCollection(); updateBlockCollection();
} }
/** /**
* add a block to the block list * add a contiguous block to the block list
*/ */
void addBlock(BlockInfoContiguous newblock) { void addBlock(BlockInfoContiguous newblock) {
if (this.blocks == null) { if (this.blocks == null) {
this.setBlocks(new BlockInfoContiguous[]{newblock}); this.setContiguousBlocks(new BlockInfoContiguous[]{newblock});
} else { } else {
int size = this.blocks.length; int size = this.blocks.length;
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + 1]; BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + 1];
System.arraycopy(this.blocks, 0, newlist, 0, size); System.arraycopy(this.blocks, 0, newlist, 0, size);
newlist[size] = newblock; newlist[size] = newblock;
this.setBlocks(newlist); this.setContiguousBlocks(newlist);
} }
} }
/** Set the blocks. */ /** Set the blocks. */
public void setBlocks(BlockInfoContiguous[] blocks) { public void setContiguousBlocks(BlockInfoContiguous[] blocks) {
this.blocks = blocks; this.blocks = blocks;
} }
@ -539,13 +612,19 @@ public class INodeFile extends INodeWithAdditionalFields
} }
public void clearFile(ReclaimContext reclaimContext) { public void clearFile(ReclaimContext reclaimContext) {
if (blocks != null && reclaimContext.collectedBlocks != null) { BlockInfo[] blks = getBlocks();
for (BlockInfoContiguous blk : blocks) { if (blks != null && reclaimContext.collectedBlocks != null) {
for (BlockInfo blk : blks) {
reclaimContext.collectedBlocks.addDeleteBlock(blk); reclaimContext.collectedBlocks.addDeleteBlock(blk);
blk.setBlockCollection(null); blk.setBlockCollection(null);
} }
} }
setBlocks(null); setContiguousBlocks(null);
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb != null) {
sb.clear();
}
if (getAclFeature() != null) { if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature()); AclStorage.removeAclFeature(getAclFeature());
} }
@ -712,13 +791,27 @@ public class INodeFile extends INodeWithAdditionalFields
*/ */
public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) { public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build(); QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfoContiguous> blocks; if (isStriped()) {
return storagespaceConsumedWithStriped(bsp);
} else {
return storagespaceConsumedWithReplication(bsp);
}
}
public final QuotaCounts storagespaceConsumedWithStriped(
BlockStoragePolicy bsp) {
return null;
}
public final QuotaCounts storagespaceConsumedWithReplication(
BlockStoragePolicy bsp) { QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfo> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) { if (sf == null) {
blocks = Arrays.asList(getBlocks()); blocks = Arrays.asList(getBlocks());
} else { } else {
// Collect all distinct blocks // Collect all distinct blocks
Set<BlockInfoContiguous> allBlocks = new HashSet<>(Arrays.asList(getBlocks())); Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
List<FileDiff> diffs = sf.getDiffs().asList(); List<FileDiff> diffs = sf.getDiffs().asList();
for(FileDiff diff : diffs) { for(FileDiff diff : diffs) {
BlockInfoContiguous[] diffBlocks = diff.getBlocks(); BlockInfoContiguous[] diffBlocks = diff.getBlocks();
@ -730,7 +823,7 @@ public class INodeFile extends INodeWithAdditionalFields
} }
final short replication = getPreferredBlockReplication(); final short replication = getPreferredBlockReplication();
for (BlockInfoContiguous b : blocks) { for (BlockInfo b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() : long blockSize = b.isComplete() ? b.getNumBytes() :
getPreferredBlockSize(); getPreferredBlockSize();
counts.addStorageSpace(blockSize * replication); counts.addStorageSpace(blockSize * replication);
@ -746,24 +839,44 @@ public class INodeFile extends INodeWithAdditionalFields
return counts; return counts;
} }
public final short getReplication(int lastSnapshotId) {
if (lastSnapshotId != CURRENT_STATE_ID) {
return getFileReplication(lastSnapshotId);
} else {
return getBlockReplication();
}
}
/** /**
* Return the penultimate allocated block for this file. * Return the penultimate allocated block for this file.
*/ */
BlockInfoContiguous getPenultimateBlock() { BlockInfo getPenultimateBlock() {
if (blocks == null || blocks.length <= 1) { BlockInfo[] blks = getBlocks();
return null; return (blks == null || blks.length <= 1) ?
} null : blks[blks.length - 2];
return blocks[blocks.length - 2];
} }
@Override @Override
public BlockInfoContiguous getLastBlock() { public BlockInfo getLastBlock() {
return blocks == null || blocks.length == 0? null: blocks[blocks.length-1]; FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb == null) {
return blocks == null || blocks.length == 0 ?
null : blocks[blocks.length - 1];
} else {
assert hasNoContiguousBlock();
return sb.getLastBlock();
}
} }
@Override @Override
public int numBlocks() { public int numBlocks() {
return blocks == null ? 0 : blocks.length; FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb == null) {
return blocks == null ? 0 : blocks.length;
} else {
assert hasNoContiguousBlock();
return sb.numBlocks();
}
} }
@VisibleForTesting @VisibleForTesting
@ -775,6 +888,7 @@ public class INodeFile extends INodeWithAdditionalFields
// only compare the first block // only compare the first block
out.print(", blocks="); out.print(", blocks=");
out.print(blocks == null || blocks.length == 0? null: blocks[0]); out.print(blocks == null || blocks.length == 0? null: blocks[0]);
// TODO print striped blocks
out.println(); out.println();
} }
@ -784,9 +898,10 @@ public class INodeFile extends INodeWithAdditionalFields
*/ */
public long collectBlocksBeyondMax(final long max, public long collectBlocksBeyondMax(final long max,
final BlocksMapUpdateInfo collectedBlocks) { final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfoContiguous[] oldBlocks = getBlocks(); final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks == null) if (oldBlocks == null) {
return 0; return 0;
}
// find the minimum n such that the size of the first n blocks > max // find the minimum n such that the size of the first n blocks > max
int n = 0; int n = 0;
long size = 0; long size = 0;
@ -865,21 +980,36 @@ public class INodeFile extends INodeWithAdditionalFields
} }
void truncateBlocksTo(int n) { void truncateBlocksTo(int n) {
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
if (sb == null) {
truncateContiguousBlocks(n);
} else {
sb.truncateStripedBlocks(n);
}
}
private void truncateContiguousBlocks(int n) {
final BlockInfoContiguous[] newBlocks; final BlockInfoContiguous[] newBlocks;
if (n == 0) { if (n == 0) {
newBlocks = BlockInfoContiguous.EMPTY_ARRAY; newBlocks = BlockInfoContiguous.EMPTY_ARRAY;
} else { } else {
newBlocks = new BlockInfoContiguous[n]; newBlocks = new BlockInfoContiguous[n];
System.arraycopy(getBlocks(), 0, newBlocks, 0, n); System.arraycopy(blocks, 0, newBlocks, 0, n);
} }
// set new blocks // set new blocks
setBlocks(newBlocks); setContiguousBlocks(newBlocks);
} }
/**
* This function is only called when block list is stored in snapshot
* diffs. Note that this can only happen when truncation happens with
* snapshots. Since we do not support truncation with striped blocks,
* we only need to handle contiguous blocks here.
*/
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks, public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks,
BlocksMapUpdateInfo collectedBlocks) { BlocksMapUpdateInfo collectedBlocks) {
BlockInfoContiguous[] oldBlocks = getBlocks(); BlockInfoContiguous[] oldBlocks = this.blocks;
if(snapshotBlocks == null || oldBlocks == null) if (snapshotBlocks == null || oldBlocks == null)
return; return;
// Skip blocks in common between the file and the snapshot // Skip blocks in common between the file and the snapshot
int n = 0; int n = 0;
@ -887,7 +1017,7 @@ public class INodeFile extends INodeWithAdditionalFields
oldBlocks[n] == snapshotBlocks[n]) { oldBlocks[n] == snapshotBlocks[n]) {
n++; n++;
} }
truncateBlocksTo(n); truncateContiguousBlocks(n);
// Collect the remaining blocks of the file // Collect the remaining blocks of the file
while(n < oldBlocks.length) { while(n < oldBlocks.length) {
collectedBlocks.addDeleteBlock(oldBlocks[n++]); collectedBlocks.addDeleteBlock(oldBlocks[n++]);

View File

@ -33,7 +33,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
@ -108,15 +108,15 @@ public class LeaseManager {
for (Long id : getINodeIdWithLeases()) { for (Long id : getINodeIdWithLeases()) {
final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile(); final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
Preconditions.checkState(cons.isUnderConstruction()); Preconditions.checkState(cons.isUnderConstruction());
BlockInfoContiguous[] blocks = cons.getBlocks(); BlockInfo[] blocks = cons.getBlocks();
if(blocks == null) { if(blocks == null) {
continue; continue;
} }
for(BlockInfoContiguous b : blocks) { for(BlockInfo b : blocks) {
if(!b.isComplete()) if(!b.isComplete())
numUCBlocks++; numUCBlocks++;
}
} }
}
LOG.info("Number of blocks under construction: " + numUCBlocks); LOG.info("Number of blocks under construction: " + numUCBlocks);
return numUCBlocks; return numUCBlocks;
} }

View File

@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactor
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
@ -243,8 +244,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
//get blockInfo //get blockInfo
Block block = new Block(Block.getBlockId(blockId)); Block block = new Block(Block.getBlockId(blockId));
//find which file this block belongs to //find which file this block belongs to
BlockInfoContiguous blockInfo = namenode.getNamesystem() BlockInfo blockInfo = namenode.getNamesystem().getStoredBlock(block);
.getStoredBlock(block);
if(blockInfo == null) { if(blockInfo == null) {
out.println("Block "+ blockId +" " + NONEXISTENT_STATUS); out.println("Block "+ blockId +" " + NONEXISTENT_STATUS);
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS); LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.hdfs.util.RwLock;
@ -45,5 +46,5 @@ public interface Namesystem extends RwLock, SafeMode {
public void checkOperation(OperationCategory read) throws StandbyException; public void checkOperation(OperationCategory read) throws StandbyException;
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC); public boolean isInSnapshot(BlockCollection bc);
} }

View File

@ -239,15 +239,16 @@ public class FSImageFormatPBSnapshot {
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null, FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
pbf.getFileSize()); pbf.getFileSize());
List<BlockProto> bpl = pbf.getBlocksList(); List<BlockProto> bpl = pbf.getBlocksList();
// TODO: also persist striped blocks // in file diff there can only be contiguous blocks
BlockInfoContiguous[] blocks = new BlockInfoContiguous[bpl.size()]; BlockInfoContiguous[] blocks = new BlockInfoContiguous[bpl.size()];
for(int j = 0, e = bpl.size(); j < e; ++j) { for(int j = 0, e = bpl.size(); j < e; ++j) {
Block blk = PBHelper.convert(bpl.get(j)); Block blk = PBHelper.convert(bpl.get(j));
BlockInfoContiguous storedBlock = BlockInfoContiguous storedBlock =
(BlockInfoContiguous) fsn.getBlockManager().getStoredBlock(blk); (BlockInfoContiguous) fsn.getBlockManager().getStoredBlock(blk);
if(storedBlock == null) { if(storedBlock == null) {
storedBlock = fsn.getBlockManager().addBlockCollection( storedBlock = (BlockInfoContiguous) fsn.getBlockManager()
new BlockInfoContiguous(blk, copy.getFileReplication()), file); .addBlockCollection(new BlockInfoContiguous(blk,
copy.getFileReplication()), file);
} }
blocks[j] = storedBlock; blocks[j] = storedBlock;
} }

View File

@ -55,7 +55,9 @@ public class FileDiffList extends
final FileDiff diff = final FileDiff diff =
super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy); super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
if (withBlocks) { // Store blocks if this is the first update if (withBlocks) { // Store blocks if this is the first update
diff.setBlocks(iNodeFile.getBlocks()); BlockInfoContiguous[] blks = iNodeFile.getContiguousBlocks();
assert blks != null;
diff.setBlocks(blks);
} }
} }
@ -118,7 +120,7 @@ public class FileDiffList extends
(earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks()); (earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks());
// Find later snapshot (or file itself) with blocks // Find later snapshot (or file itself) with blocks
BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId()); BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks; laterBlocks = (laterBlocks == null) ? file.getContiguousBlocks() : laterBlocks;
// Skip blocks, which belong to either the earlier or the later lists // Skip blocks, which belong to either the earlier or the later lists
int i = 0; int i = 0;
for(; i < removedBlocks.length; i++) { for(; i < removedBlocks.length; i++) {

View File

@ -91,6 +91,10 @@ message INodeSection {
optional string clientMachine = 2; optional string clientMachine = 2;
} }
message StripedBlocksFeature {
repeated StripedBlockProto blocks = 1;
}
message AclFeatureProto { message AclFeatureProto {
/** /**
* An ACL entry is represented by a 32-bit integer in Big Endian * An ACL entry is represented by a 32-bit integer in Big Endian
@ -139,6 +143,7 @@ message INodeSection {
optional AclFeatureProto acl = 8; optional AclFeatureProto acl = 8;
optional XAttrFeatureProto xAttrs = 9; optional XAttrFeatureProto xAttrs = 9;
optional uint32 storagePolicyID = 10; optional uint32 storagePolicyID = 10;
optional StripedBlocksFeature stripedBlocks = 11;
} }
message QuotaByStorageTypeEntryProto { message QuotaByStorageTypeEntryProto {

View File

@ -490,6 +490,16 @@ message BlockProto {
optional uint64 numBytes = 3 [default = 0]; optional uint64 numBytes = 3 [default = 0];
} }
/**
* Striped block information. Besides the basic information for a block,
* it also contains the number of data/parity blocks.
*/
message StripedBlockProto {
required BlockProto block = 1;
optional uint32 dataBlockNum = 2;
optional uint32 parityBlockNum = 3;
}
/** /**
* Block and datanodes where is it located * Block and datanodes where is it located
*/ */

View File

@ -108,6 +108,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -1609,7 +1610,7 @@ public class DFSTestUtil {
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
ExtendedBlock blk) { ExtendedBlock blk) {
FSNamesystem fsn = nn.getNamesystem(); FSNamesystem fsn = nn.getNamesystem();
BlockInfoContiguous storedBlock = fsn.getStoredBlock(blk.getLocalBlock()); BlockInfo storedBlock = fsn.getStoredBlock(blk.getLocalBlock());
assertTrue("Block " + blk + " should be under construction, " + assertTrue("Block " + blk + " should be under construction, " +
"got: " + storedBlock, "got: " + storedBlock,
storedBlock instanceof BlockInfoContiguousUnderConstruction); storedBlock instanceof BlockInfoContiguousUnderConstruction);

View File

@ -1246,8 +1246,8 @@ public class TestReplicationPolicy {
(DatanodeStorageInfo.AddBlockResult.ADDED); (DatanodeStorageInfo.AddBlockResult.ADDED);
ucBlock.addStorage(storage, ucBlock); ucBlock.addStorage(storage, ucBlock);
when(mbc.setLastBlock((BlockInfoContiguous) any(), (DatanodeStorageInfo[]) any())) BlockInfo lastBlk = mbc.getLastBlock();
.thenReturn(ucBlock); when(mbc.getLastBlock()).thenReturn(lastBlk, ucBlock);
bm.convertLastBlockToUnderConstruction(mbc, 0L); bm.convertLastBlockToUnderConstruction(mbc, 0L);

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -87,21 +87,21 @@ public class TestAddBlock {
// check file1 // check file1
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile(); INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
BlockInfoContiguous[] file1Blocks = file1Node.getBlocks(); BlockInfo[] file1Blocks = file1Node.getBlocks();
assertEquals(1, file1Blocks.length); assertEquals(1, file1Blocks.length);
assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes()); assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState()); assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState());
// check file2 // check file2
INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile(); INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile();
BlockInfoContiguous[] file2Blocks = file2Node.getBlocks(); BlockInfo[] file2Blocks = file2Node.getBlocks();
assertEquals(1, file2Blocks.length); assertEquals(1, file2Blocks.length);
assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes()); assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState()); assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState());
// check file3 // check file3
INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile(); INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile();
BlockInfoContiguous[] file3Blocks = file3Node.getBlocks(); BlockInfo[] file3Blocks = file3Node.getBlocks();
assertEquals(2, file3Blocks.length); assertEquals(2, file3Blocks.length);
assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes()); assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState()); assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState());
@ -110,7 +110,7 @@ public class TestAddBlock {
// check file4 // check file4
INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile(); INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile();
BlockInfoContiguous[] file4Blocks = file4Node.getBlocks(); BlockInfo[] file4Blocks = file4Node.getBlocks();
assertEquals(2, file4Blocks.length); assertEquals(2, file4Blocks.length);
assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes()); assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState()); assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState());
@ -141,7 +141,7 @@ public class TestAddBlock {
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile(); INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
BlockInfoContiguous[] fileBlocks = fileNode.getBlocks(); BlockInfo[] fileBlocks = fileNode.getBlocks();
assertEquals(2, fileBlocks.length); assertEquals(2, fileBlocks.length);
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes()); assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState()); assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -75,7 +76,7 @@ public class TestAddBlockgroup {
final Path file1 = new Path("/file1"); final Path file1 = new Path("/file1");
DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L); DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L);
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile(); INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
BlockInfoContiguous[] file1Blocks = file1Node.getBlocks(); BlockInfo[] file1Blocks = file1Node.getBlocks();
assertEquals(2, file1Blocks.length); assertEquals(2, file1Blocks.length);
assertEquals(GROUP_SIZE, file1Blocks[0].numNodes()); assertEquals(GROUP_SIZE, file1Blocks[0].numNodes());
assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP, assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP,

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@ -91,12 +91,12 @@ public class TestBlockUnderConstruction {
" isUnderConstruction = " + inode.isUnderConstruction() + " isUnderConstruction = " + inode.isUnderConstruction() +
" expected to be " + isFileOpen, " expected to be " + isFileOpen,
inode.isUnderConstruction() == isFileOpen); inode.isUnderConstruction() == isFileOpen);
BlockInfoContiguous[] blocks = inode.getBlocks(); BlockInfo[] blocks = inode.getBlocks();
assertTrue("File does not have blocks: " + inode.toString(), assertTrue("File does not have blocks: " + inode.toString(),
blocks != null && blocks.length > 0); blocks != null && blocks.length > 0);
int idx = 0; int idx = 0;
BlockInfoContiguous curBlock; BlockInfo curBlock;
// all blocks but the last two should be regular blocks // all blocks but the last two should be regular blocks
for(; idx < blocks.length - 2; idx++) { for(; idx < blocks.length - 2; idx++) {
curBlock = blocks[idx]; curBlock = blocks[idx];

View File

@ -24,6 +24,7 @@ import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Assert; import org.junit.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -39,7 +40,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
@ -105,7 +105,7 @@ public class TestFSImage {
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile(); INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
assertEquals("hello".length(), file2Node.computeFileSize()); assertEquals("hello".length(), file2Node.computeFileSize());
assertTrue(file2Node.isUnderConstruction()); assertTrue(file2Node.isUnderConstruction());
BlockInfoContiguous[] blks = file2Node.getBlocks(); BlockInfo[] blks = file2Node.getBlocks();
assertEquals(1, blks.length); assertEquals(1, blks.length);
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState()); assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
// check lease manager // check lease manager

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@ -1035,7 +1036,8 @@ public class TestFileTruncate {
iip = fsn.getFSDirectory().getINodesInPath(src, true); iip = fsn.getFSDirectory().getINodesInPath(src, true);
file = iip.getLastINode().asFile(); file = iip.getLastINode().asFile();
file.recordModification(iip.getLatestSnapshotId(), true); file.recordModification(iip.getLatestSnapshotId(), true);
assertThat(file.isBlockInLatestSnapshot(file.getLastBlock()), is(true)); assertThat(file.isBlockInLatestSnapshot(
(BlockInfoContiguous) file.getLastBlock()), is(true));
initialGenStamp = file.getLastBlock().getGenerationStamp(); initialGenStamp = file.getLastBlock().getGenerationStamp();
// Test that prepareFileForTruncate sets up copy-on-write truncate // Test that prepareFileForTruncate sets up copy-on-write truncate
fsn.writeLock(); fsn.writeLock();

View File

@ -79,7 +79,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -885,7 +885,7 @@ public class TestFsck {
// intentionally corrupt NN data structure // intentionally corrupt NN data structure
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
(fileName, true); (fileName, true);
final BlockInfoContiguous[] blocks = node.getBlocks(); final BlockInfo[] blocks = node.getBlocks();
assertEquals(blocks.length, 1); assertEquals(blocks.length, 1);
blocks[0].setNumBytes(-1L); // set the block length to be negative blocks[0].setNumBytes(-1L); // set the block length to be negative

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -108,14 +108,14 @@ public class TestSnapshotBlocksMap {
final FSDirectory dir, final BlockManager blkManager) throws Exception { final FSDirectory dir, final BlockManager blkManager) throws Exception {
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path); final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
assertEquals(numBlocks, file.getBlocks().length); assertEquals(numBlocks, file.getBlocks().length);
for(BlockInfoContiguous b : file.getBlocks()) { for(BlockInfo b : file.getBlocks()) {
assertBlockCollection(blkManager, file, b); assertBlockCollection(blkManager, file, b);
} }
return file; return file;
} }
static void assertBlockCollection(final BlockManager blkManager, static void assertBlockCollection(final BlockManager blkManager,
final INodeFile file, final BlockInfoContiguous b) { final INodeFile file, final BlockInfo b) {
Assert.assertSame(b, blkManager.getStoredBlock(b)); Assert.assertSame(b, blkManager.getStoredBlock(b));
Assert.assertSame(file, blkManager.getBlockCollection(b)); Assert.assertSame(file, blkManager.getBlockCollection(b));
Assert.assertSame(file, b.getBlockCollection()); Assert.assertSame(file, b.getBlockCollection());
@ -146,10 +146,10 @@ public class TestSnapshotBlocksMap {
{ {
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir, final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
blockmanager); blockmanager);
BlockInfoContiguous[] blocks = f2.getBlocks(); BlockInfo[] blocks = f2.getBlocks();
hdfs.delete(sub2, true); hdfs.delete(sub2, true);
// The INode should have been removed from the blocksMap // The INode should have been removed from the blocksMap
for(BlockInfoContiguous b : blocks) { for(BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b)); assertNull(blockmanager.getBlockCollection(b));
} }
} }
@ -177,7 +177,7 @@ public class TestSnapshotBlocksMap {
// Check the block information for file0 // Check the block information for file0
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir, final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
blockmanager); blockmanager);
BlockInfoContiguous[] blocks0 = f0.getBlocks(); BlockInfo[] blocks0 = f0.getBlocks();
// Also check the block information for snapshot of file0 // Also check the block information for snapshot of file0
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0", Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
@ -187,7 +187,7 @@ public class TestSnapshotBlocksMap {
// Delete file0 // Delete file0
hdfs.delete(file0, true); hdfs.delete(file0, true);
// Make sure the blocks of file0 is still in blocksMap // Make sure the blocks of file0 is still in blocksMap
for(BlockInfoContiguous b : blocks0) { for(BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b)); assertNotNull(blockmanager.getBlockCollection(b));
} }
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager); assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
@ -201,7 +201,7 @@ public class TestSnapshotBlocksMap {
hdfs.deleteSnapshot(sub1, "s1"); hdfs.deleteSnapshot(sub1, "s1");
// Make sure the first block of file0 is still in blocksMap // Make sure the first block of file0 is still in blocksMap
for(BlockInfoContiguous b : blocks0) { for(BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b)); assertNotNull(blockmanager.getBlockCollection(b));
} }
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager); assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
@ -293,7 +293,7 @@ public class TestSnapshotBlocksMap {
hdfs.append(bar); hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfoContiguous[] blks = barNode.getBlocks(); BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length); assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes()); assertEquals(BLOCKSIZE, blks[0].getNumBytes());
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
@ -331,7 +331,7 @@ public class TestSnapshotBlocksMap {
hdfs.append(bar); hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfoContiguous[] blks = barNode.getBlocks(); BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length); assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc() cluster.getNameNodeRpc()
@ -370,7 +370,7 @@ public class TestSnapshotBlocksMap {
hdfs.append(bar); hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfoContiguous[] blks = barNode.getBlocks(); BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length); assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc() cluster.getNameNodeRpc()
@ -421,7 +421,7 @@ public class TestSnapshotBlocksMap {
out.write(testData); out.write(testData);
out.close(); out.close();
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfoContiguous[] blks = barNode.getBlocks(); BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length); assertEquals(1, blks.length);
assertEquals(testData.length, blks[0].getNumBytes()); assertEquals(testData.length, blks[0].getNumBytes());

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -262,12 +262,12 @@ public class TestSnapshotDeletion {
DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed); DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection( final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
tempFile.toString(), 1, fsdir, blockmanager); tempFile.toString(), 1, fsdir, blockmanager);
BlockInfoContiguous[] blocks = temp.getBlocks(); BlockInfo[] blocks = temp.getBlocks();
hdfs.delete(tempDir, true); hdfs.delete(tempDir, true);
// check dir's quota usage // check dir's quota usage
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3); checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
// check blocks of tempFile // check blocks of tempFile
for (BlockInfoContiguous b : blocks) { for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b)); assertNull(blockmanager.getBlockCollection(b));
} }
@ -344,7 +344,7 @@ public class TestSnapshotDeletion {
// while deletion, we add diff for subsub and metaChangeFile1, and remove // while deletion, we add diff for subsub and metaChangeFile1, and remove
// newFile // newFile
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4); checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
for (BlockInfoContiguous b : blocks) { for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b)); assertNull(blockmanager.getBlockCollection(b));
} }
@ -481,7 +481,7 @@ public class TestSnapshotDeletion {
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager); .assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
BlockInfoContiguous[] blocks = toDeleteFileNode.getBlocks(); BlockInfo[] blocks = toDeleteFileNode.getBlocks();
// create snapshot s0 on dir // create snapshot s0 on dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0"); SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
@ -507,7 +507,7 @@ public class TestSnapshotDeletion {
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
// metaChangeFile's replication factor decreases // metaChangeFile's replication factor decreases
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE); checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for (BlockInfoContiguous b : blocks) { for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b)); assertNull(blockmanager.getBlockCollection(b));
} }
@ -801,7 +801,7 @@ public class TestSnapshotDeletion {
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1); FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection( INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
file14_s2.toString(), 1, fsdir, blockmanager); file14_s2.toString(), 1, fsdir, blockmanager);
BlockInfoContiguous[] blocks_14 = file14Node.getBlocks(); BlockInfo[] blocks_14 = file14Node.getBlocks();
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir, TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
blockmanager); blockmanager);
@ -838,7 +838,7 @@ public class TestSnapshotDeletion {
modDirStr + "file15"); modDirStr + "file15");
assertFalse(hdfs.exists(file14_s1)); assertFalse(hdfs.exists(file14_s1));
assertFalse(hdfs.exists(file15_s1)); assertFalse(hdfs.exists(file15_s1));
for (BlockInfoContiguous b : blocks_14) { for (BlockInfo b : blocks_14) {
assertNull(blockmanager.getBlockCollection(b)); assertNull(blockmanager.getBlockCollection(b));
} }