HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed by Jing Zhao.
This commit is contained in:
parent
dae27f6dd1
commit
9f2f583f40
|
@ -172,6 +172,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
||||
|
@ -184,6 +185,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
|
@ -428,6 +430,21 @@ public class PBHelper {
|
|||
return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
|
||||
}
|
||||
|
||||
public static BlockInfoStriped convert(StripedBlockProto p) {
|
||||
return new BlockInfoStriped(convert(p.getBlock()),
|
||||
(short) p.getDataBlockNum(), (short) p.getParityBlockNum());
|
||||
}
|
||||
|
||||
public static StripedBlockProto convert(BlockInfoStriped blk) {
|
||||
BlockProto bp = BlockProto.newBuilder().setBlockId(blk.getBlockId())
|
||||
.setGenStamp(blk.getGenerationStamp()).setNumBytes(blk.getNumBytes())
|
||||
.build();
|
||||
return StripedBlockProto.newBuilder()
|
||||
.setDataBlockNum(blk.getDataBlockNum())
|
||||
.setParityBlockNum(blk.getParityBlockNum())
|
||||
.setBlock(bp).build();
|
||||
}
|
||||
|
||||
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
|
||||
return BlockWithLocationsProto.newBuilder()
|
||||
.setBlock(convert(blk.getBlock()))
|
||||
|
|
|
@ -31,7 +31,7 @@ public interface BlockCollection {
|
|||
/**
|
||||
* Get the last block of the collection.
|
||||
*/
|
||||
public BlockInfoContiguous getLastBlock();
|
||||
public BlockInfo getLastBlock();
|
||||
|
||||
/**
|
||||
* Get content summary.
|
||||
|
@ -44,9 +44,9 @@ public interface BlockCollection {
|
|||
public int numBlocks();
|
||||
|
||||
/**
|
||||
* Get the blocks or block groups.
|
||||
* Get the blocks (striped or contiguous).
|
||||
*/
|
||||
public BlockInfoContiguous[] getBlocks();
|
||||
public BlockInfo[] getBlocks();
|
||||
|
||||
/**
|
||||
* Get preferred block size for the collection
|
||||
|
@ -71,16 +71,15 @@ public interface BlockCollection {
|
|||
public String getName();
|
||||
|
||||
/**
|
||||
* Set the block/block-group at the given index.
|
||||
* Set the block (contiguous or striped) at the given index.
|
||||
*/
|
||||
public void setBlock(int index, BlockInfoContiguous blk);
|
||||
public void setBlock(int index, BlockInfo blk);
|
||||
|
||||
/**
|
||||
* Convert the last block of the collection to an under-construction block
|
||||
* and set the locations.
|
||||
*/
|
||||
public BlockInfoContiguousUnderConstruction setLastBlock(
|
||||
BlockInfoContiguous lastBlock,
|
||||
public void convertLastBlockToUC(BlockInfo lastBlock,
|
||||
DatanodeStorageInfo[] targets) throws IOException;
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.util.LightWeightGSet;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
|
||||
/**
|
||||
|
@ -289,8 +290,9 @@ public abstract class BlockInfo extends Block
|
|||
|
||||
/**
|
||||
* BlockInfo represents a block that is not being constructed.
|
||||
* In order to start modifying the block, the BlockInfo should be converted
|
||||
* to {@link BlockInfoContiguousUnderConstruction}.
|
||||
* In order to start modifying the block, the BlockInfo should be converted to
|
||||
* {@link BlockInfoContiguousUnderConstruction} or
|
||||
* {@link BlockInfoStripedUnderConstruction}.
|
||||
* @return {@link HdfsServerConstants.BlockUCState#COMPLETE}
|
||||
*/
|
||||
public HdfsServerConstants.BlockUCState getBlockUCState() {
|
||||
|
@ -340,4 +342,86 @@ public abstract class BlockInfo extends Block
|
|||
return new BlockInfoStriped((BlockInfoStriped) b);
|
||||
}
|
||||
}
|
||||
|
||||
static BlockInfo convertToCompleteBlock(BlockInfo blk) throws IOException {
|
||||
if (blk instanceof BlockInfoContiguousUnderConstruction) {
|
||||
return ((BlockInfoContiguousUnderConstruction) blk)
|
||||
.convertToCompleteBlock();
|
||||
} else if (blk instanceof BlockInfoStripedUnderConstruction) {
|
||||
return ((BlockInfoStripedUnderConstruction) blk).convertToCompleteBlock();
|
||||
} else {
|
||||
return blk;
|
||||
}
|
||||
}
|
||||
|
||||
static void commitBlock(BlockInfo blockInfo, Block reported)
|
||||
throws IOException {
|
||||
if (blockInfo instanceof BlockInfoContiguousUnderConstruction) {
|
||||
((BlockInfoContiguousUnderConstruction) blockInfo).commitBlock(reported);
|
||||
} else if (blockInfo instanceof BlockInfoStripedUnderConstruction) {
|
||||
((BlockInfoStripedUnderConstruction) blockInfo).commitBlock(reported);
|
||||
}
|
||||
}
|
||||
|
||||
static void addReplica(BlockInfo ucBlock, DatanodeStorageInfo storageInfo,
|
||||
Block reportedBlock, HdfsServerConstants.ReplicaState reportedState) {
|
||||
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
|
||||
ucBlock instanceof BlockInfoStripedUnderConstruction;
|
||||
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
|
||||
((BlockInfoContiguousUnderConstruction) ucBlock).addReplicaIfNotPresent(
|
||||
storageInfo, reportedBlock, reportedState);
|
||||
} else { // StripedUC
|
||||
((BlockInfoStripedUnderConstruction) ucBlock).addReplicaIfNotPresent(
|
||||
storageInfo, reportedBlock, reportedState);
|
||||
}
|
||||
}
|
||||
|
||||
static int getNumExpectedLocations(BlockInfo ucBlock) {
|
||||
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
|
||||
ucBlock instanceof BlockInfoStripedUnderConstruction;
|
||||
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
|
||||
return ((BlockInfoContiguousUnderConstruction) ucBlock)
|
||||
.getNumExpectedLocations();
|
||||
} else { // StripedUC
|
||||
return ((BlockInfoStripedUnderConstruction) ucBlock)
|
||||
.getNumExpectedLocations();
|
||||
}
|
||||
}
|
||||
|
||||
public static DatanodeStorageInfo[] getExpectedStorageLocations(
|
||||
BlockInfo ucBlock) {
|
||||
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
|
||||
ucBlock instanceof BlockInfoStripedUnderConstruction;
|
||||
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
|
||||
return ((BlockInfoContiguousUnderConstruction) ucBlock)
|
||||
.getExpectedStorageLocations();
|
||||
} else { // StripedUC
|
||||
return ((BlockInfoStripedUnderConstruction) ucBlock)
|
||||
.getExpectedStorageLocations();
|
||||
}
|
||||
}
|
||||
|
||||
public static void setExpectedLocations(BlockInfo ucBlock,
|
||||
DatanodeStorageInfo[] targets) {
|
||||
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
|
||||
ucBlock instanceof BlockInfoStripedUnderConstruction;
|
||||
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
|
||||
((BlockInfoContiguousUnderConstruction) ucBlock)
|
||||
.setExpectedLocations(targets);
|
||||
} else { // StripedUC
|
||||
((BlockInfoStripedUnderConstruction) ucBlock)
|
||||
.setExpectedLocations(targets);
|
||||
}
|
||||
}
|
||||
|
||||
public static long getBlockRecoveryId(BlockInfo ucBlock) {
|
||||
assert ucBlock instanceof BlockInfoContiguousUnderConstruction ||
|
||||
ucBlock instanceof BlockInfoStripedUnderConstruction;
|
||||
if (ucBlock instanceof BlockInfoContiguousUnderConstruction) {
|
||||
return ((BlockInfoContiguousUnderConstruction) ucBlock)
|
||||
.getBlockRecoveryId();
|
||||
} else { // StripedUC
|
||||
return ((BlockInfoStripedUnderConstruction) ucBlock).getBlockRecoveryId();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
|
|||
BlockUCState state, DatanodeStorageInfo[] targets) {
|
||||
super(blk, replication);
|
||||
assert getBlockUCState() != BlockUCState.COMPLETE :
|
||||
"BlockInfoUnderConstruction cannot be in COMPLETE state";
|
||||
"BlockInfoContiguousUnderConstruction cannot be in COMPLETE state";
|
||||
this.blockUCState = state;
|
||||
setExpectedLocations(targets);
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
|
|||
/**
|
||||
* Convert an under construction block to a complete block.
|
||||
*
|
||||
* @return BlockInfo - a complete block.
|
||||
* @return BlockInfoContiguous - a complete block.
|
||||
* @throws IOException if the state of the block
|
||||
* (the generation stamp and the length) has not been committed by
|
||||
* the client or it does not have at least a minimal number of replicas
|
||||
|
@ -197,7 +197,7 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
|
|||
blockRecoveryId = recoveryId;
|
||||
if (replicas.size() == 0) {
|
||||
NameNode.blockStateChangeLog.warn("BLOCK*"
|
||||
+ " BlockInfoUnderConstruction.initLeaseRecovery:"
|
||||
+ " BlockInfoContiguousUnderConstruction.initLeaseRecovery:"
|
||||
+ " No blocks found, lease removed.");
|
||||
}
|
||||
boolean allLiveReplicasTriedAsPrimary = true;
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
|
||||
/**
|
||||
* Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
|
||||
|
@ -59,6 +61,14 @@ public class BlockInfoStriped extends BlockInfo {
|
|||
return (short) (dataBlockNum + parityBlockNum);
|
||||
}
|
||||
|
||||
public short getDataBlockNum() {
|
||||
return dataBlockNum;
|
||||
}
|
||||
|
||||
public short getParityBlockNum() {
|
||||
return parityBlockNum;
|
||||
}
|
||||
|
||||
private void initIndices() {
|
||||
for (int i = 0; i < indices.length; i++) {
|
||||
indices[i] = -1;
|
||||
|
@ -176,4 +186,25 @@ public class BlockInfoStriped extends BlockInfo {
|
|||
}
|
||||
return num;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a complete block to an under construction block.
|
||||
* @return BlockInfoUnderConstruction - an under construction block.
|
||||
*/
|
||||
public BlockInfoStripedUnderConstruction convertToBlockUnderConstruction(
|
||||
BlockUCState s, DatanodeStorageInfo[] targets) {
|
||||
final BlockInfoStripedUnderConstruction ucBlock;
|
||||
if(isComplete()) {
|
||||
ucBlock = new BlockInfoStripedUnderConstruction(this, getDataBlockNum(),
|
||||
getParityBlockNum(), s, targets);
|
||||
ucBlock.setBlockCollection(getBlockCollection());
|
||||
} else {
|
||||
// the block is already under construction
|
||||
ucBlock = (BlockInfoStripedUnderConstruction) this;
|
||||
ucBlock.setBlockUCState(s);
|
||||
ucBlock.setExpectedLocations(targets);
|
||||
ucBlock.setBlockCollection(getBlockCollection());
|
||||
}
|
||||
return ucBlock;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,240 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.COMPLETE;
|
||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
|
||||
|
||||
/**
|
||||
* Represents a striped block that is currently being constructed.
|
||||
* This is usually the last block of a file opened for write or append.
|
||||
*/
|
||||
public class BlockInfoStripedUnderConstruction extends BlockInfoStriped {
|
||||
private BlockUCState blockUCState;
|
||||
|
||||
/**
|
||||
* Block replicas as assigned when the block was allocated.
|
||||
*
|
||||
* TODO: we need to update this attribute, along with the return type of
|
||||
* getExpectedStorageLocations and LocatedBlock. For striped blocks, clients
|
||||
* need to understand the index of each striped block in the block group.
|
||||
*/
|
||||
private List<ReplicaUnderConstruction> replicas;
|
||||
|
||||
/**
|
||||
* The new generation stamp, which this block will have
|
||||
* after the recovery succeeds. Also used as a recovery id to identify
|
||||
* the right recovery if any of the abandoned recoveries re-appear.
|
||||
*/
|
||||
private long blockRecoveryId = 0;
|
||||
|
||||
/**
|
||||
* Constructor with null storage targets.
|
||||
*/
|
||||
public BlockInfoStripedUnderConstruction(Block blk, short dataBlockNum,
|
||||
short parityBlockNum) {
|
||||
this(blk, dataBlockNum, parityBlockNum, UNDER_CONSTRUCTION, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a striped block that is currently being constructed.
|
||||
*/
|
||||
public BlockInfoStripedUnderConstruction(Block blk, short dataBlockNum,
|
||||
short parityBlockNum, BlockUCState state, DatanodeStorageInfo[] targets) {
|
||||
super(blk, dataBlockNum, parityBlockNum);
|
||||
assert getBlockUCState() != COMPLETE :
|
||||
"BlockInfoStripedUnderConstruction cannot be in COMPLETE state";
|
||||
this.blockUCState = state;
|
||||
setExpectedLocations(targets);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an under construction striped block to a complete striped block.
|
||||
*
|
||||
* @return BlockInfoStriped - a complete block.
|
||||
* @throws IOException if the state of the block
|
||||
* (the generation stamp and the length) has not been committed by
|
||||
* the client or it does not have at least a minimal number of replicas
|
||||
* reported from data-nodes.
|
||||
*/
|
||||
BlockInfoStriped convertToCompleteBlock() throws IOException {
|
||||
assert getBlockUCState() != COMPLETE :
|
||||
"Trying to convert a COMPLETE block";
|
||||
return new BlockInfoStriped(this);
|
||||
}
|
||||
|
||||
/** Set expected locations */
|
||||
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
|
||||
int numLocations = targets == null ? 0 : targets.length;
|
||||
this.replicas = new ArrayList<>(numLocations);
|
||||
for(int i = 0; i < numLocations; i++) {
|
||||
replicas.add(new ReplicaUnderConstruction(this, targets[i],
|
||||
ReplicaState.RBW));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create array of expected replica locations
|
||||
* (as has been assigned by chooseTargets()).
|
||||
*/
|
||||
public DatanodeStorageInfo[] getExpectedStorageLocations() {
|
||||
int numLocations = getNumExpectedLocations();
|
||||
DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations];
|
||||
for (int i = 0; i < numLocations; i++) {
|
||||
storages[i] = replicas.get(i).getExpectedStorageLocation();
|
||||
}
|
||||
return storages;
|
||||
}
|
||||
|
||||
/** Get the number of expected locations */
|
||||
public int getNumExpectedLocations() {
|
||||
return replicas == null ? 0 : replicas.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the state of the block under construction.
|
||||
* @see BlockUCState
|
||||
*/
|
||||
@Override // BlockInfo
|
||||
public BlockUCState getBlockUCState() {
|
||||
return blockUCState;
|
||||
}
|
||||
|
||||
void setBlockUCState(BlockUCState s) {
|
||||
blockUCState = s;
|
||||
}
|
||||
|
||||
/** Get block recovery ID */
|
||||
public long getBlockRecoveryId() {
|
||||
return blockRecoveryId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the recorded replicas. When about to commit or finish the
|
||||
* pipeline recovery sort out bad replicas.
|
||||
* @param genStamp The final generation stamp for the block.
|
||||
*/
|
||||
public void setGenerationStampAndVerifyReplicas(long genStamp) {
|
||||
// Set the generation stamp for the block.
|
||||
setGenerationStamp(genStamp);
|
||||
if (replicas == null)
|
||||
return;
|
||||
|
||||
// Remove the replicas with wrong gen stamp.
|
||||
// The replica list is unchanged.
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
if (genStamp != r.getGenerationStamp()) {
|
||||
r.getExpectedStorageLocation().removeBlock(this);
|
||||
NameNode.blockStateChangeLog.info("BLOCK* Removing stale replica "
|
||||
+ "from location: {}", r.getExpectedStorageLocation());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit block's length and generation stamp as reported by the client.
|
||||
* Set block state to {@link BlockUCState#COMMITTED}.
|
||||
* @param block - contains client reported block length and generation
|
||||
*/
|
||||
void commitBlock(Block block) throws IOException {
|
||||
if (getBlockId() != block.getBlockId()) {
|
||||
throw new IOException("Trying to commit inconsistent block: id = "
|
||||
+ block.getBlockId() + ", expected id = " + getBlockId());
|
||||
}
|
||||
blockUCState = BlockUCState.COMMITTED;
|
||||
this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
|
||||
// Sort out invalid replicas.
|
||||
setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize lease recovery for this striped block.
|
||||
*/
|
||||
public void initializeBlockRecovery(long recoveryId) {
|
||||
setBlockUCState(BlockUCState.UNDER_RECOVERY);
|
||||
blockRecoveryId = recoveryId;
|
||||
if (replicas == null || replicas.size() == 0) {
|
||||
NameNode.blockStateChangeLog.warn("BLOCK*" +
|
||||
" BlockInfoUnderConstruction.initLeaseRecovery:" +
|
||||
" No blocks found, lease removed.");
|
||||
}
|
||||
// TODO we need to implement different recovery logic here
|
||||
}
|
||||
|
||||
void addReplicaIfNotPresent(DatanodeStorageInfo storage, Block block,
|
||||
ReplicaState rState) {
|
||||
Iterator<ReplicaUnderConstruction> it = replicas.iterator();
|
||||
while (it.hasNext()) {
|
||||
ReplicaUnderConstruction r = it.next();
|
||||
DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
|
||||
if (expectedLocation == storage) {
|
||||
// Record the gen stamp from the report
|
||||
r.setGenerationStamp(block.getGenerationStamp());
|
||||
return;
|
||||
} else if (expectedLocation != null &&
|
||||
expectedLocation.getDatanodeDescriptor() ==
|
||||
storage.getDatanodeDescriptor()) {
|
||||
// The Datanode reported that the block is on a different storage
|
||||
// than the one chosen by BlockPlacementPolicy. This can occur as
|
||||
// we allow Datanodes to choose the target storage. Update our
|
||||
// state by removing the stale entry and adding a new one.
|
||||
it.remove();
|
||||
break;
|
||||
}
|
||||
}
|
||||
replicas.add(new ReplicaUnderConstruction(block, storage, rState));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder b = new StringBuilder(100);
|
||||
appendStringTo(b);
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void appendStringTo(StringBuilder sb) {
|
||||
super.appendStringTo(sb);
|
||||
appendUCParts(sb);
|
||||
}
|
||||
|
||||
private void appendUCParts(StringBuilder sb) {
|
||||
sb.append("{UCState=").append(blockUCState).append(", replicas=[");
|
||||
if (replicas != null) {
|
||||
Iterator<ReplicaUnderConstruction> iter = replicas.iterator();
|
||||
if (iter.hasNext()) {
|
||||
iter.next().appendStringTo(sb);
|
||||
while (iter.hasNext()) {
|
||||
sb.append(", ");
|
||||
iter.next().appendStringTo(sb);
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.append("]}");
|
||||
}
|
||||
}
|
|
@ -543,8 +543,8 @@ public class BlockManager {
|
|||
int usableReplicas = numReplicas.liveReplicas() +
|
||||
numReplicas.decommissionedAndDecommissioning();
|
||||
|
||||
if (block instanceof BlockInfoContiguous) {
|
||||
BlockCollection bc = ((BlockInfoContiguous) block).getBlockCollection();
|
||||
if (block instanceof BlockInfo) {
|
||||
BlockCollection bc = ((BlockInfo) block).getBlockCollection();
|
||||
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
|
||||
out.print(fileName + ": ");
|
||||
}
|
||||
|
@ -598,15 +598,14 @@ public class BlockManager {
|
|||
* @throws IOException if the block does not have at least a minimal number
|
||||
* of replicas reported from data-nodes.
|
||||
*/
|
||||
private static boolean commitBlock(
|
||||
final BlockInfoContiguousUnderConstruction block,
|
||||
private static boolean commitBlock(final BlockInfo block,
|
||||
final Block commitBlock) throws IOException {
|
||||
if (block.getBlockUCState() == BlockUCState.COMMITTED)
|
||||
return false;
|
||||
assert block.getNumBytes() <= commitBlock.getNumBytes() :
|
||||
"commitBlock length is less than the stored one "
|
||||
+ commitBlock.getNumBytes() + " vs. " + block.getNumBytes();
|
||||
block.commitBlock(commitBlock);
|
||||
BlockInfo.commitBlock(block, commitBlock);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -624,16 +623,16 @@ public class BlockManager {
|
|||
Block commitBlock) throws IOException {
|
||||
if(commitBlock == null)
|
||||
return false; // not committing, this is a block allocation retry
|
||||
BlockInfoContiguous lastBlock = bc.getLastBlock();
|
||||
BlockInfo lastBlock = bc.getLastBlock();
|
||||
if(lastBlock == null)
|
||||
return false; // no blocks in file yet
|
||||
if(lastBlock.isComplete())
|
||||
return false; // already completed (e.g. by syncBlock)
|
||||
|
||||
final boolean b = commitBlock(
|
||||
(BlockInfoContiguousUnderConstruction)lastBlock, commitBlock);
|
||||
if(countNodes(lastBlock).liveReplicas() >= minReplication)
|
||||
completeBlock(bc, bc.numBlocks()-1, false);
|
||||
final boolean b = commitBlock(lastBlock, commitBlock);
|
||||
if (countNodes(lastBlock).liveReplicas() >= minReplication) {
|
||||
completeBlock(bc, bc.numBlocks() - 1, false);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
|
@ -646,22 +645,25 @@ public class BlockManager {
|
|||
*/
|
||||
private BlockInfo completeBlock(final BlockCollection bc,
|
||||
final int blkIndex, boolean force) throws IOException {
|
||||
if(blkIndex < 0)
|
||||
if (blkIndex < 0) {
|
||||
return null;
|
||||
BlockInfoContiguous curBlock = bc.getBlocks()[blkIndex];
|
||||
if (curBlock.isComplete())
|
||||
}
|
||||
BlockInfo curBlock = bc.getBlocks()[blkIndex];
|
||||
if (curBlock.isComplete()) {
|
||||
return curBlock;
|
||||
// TODO: support BlockInfoStripedUC
|
||||
BlockInfoContiguousUnderConstruction ucBlock =
|
||||
(BlockInfoContiguousUnderConstruction)curBlock;
|
||||
int numNodes = ucBlock.numNodes();
|
||||
if (!force && numNodes < minReplication)
|
||||
}
|
||||
|
||||
int numNodes = curBlock.numNodes();
|
||||
if (!force && numNodes < minReplication) {
|
||||
throw new IOException("Cannot complete block: " +
|
||||
"block does not satisfy minimal replication requirement.");
|
||||
if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED)
|
||||
}
|
||||
if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
|
||||
throw new IOException(
|
||||
"Cannot complete block: block has not been COMMITTED by the client");
|
||||
BlockInfoContiguous completeBlock = ucBlock.convertToCompleteBlock();
|
||||
}
|
||||
|
||||
final BlockInfo completeBlock = BlockInfo.convertToCompleteBlock(curBlock);
|
||||
// replace penultimate block in file
|
||||
bc.setBlock(blkIndex, completeBlock);
|
||||
|
||||
|
@ -679,10 +681,9 @@ public class BlockManager {
|
|||
return blocksMap.replaceBlock(completeBlock);
|
||||
}
|
||||
|
||||
// TODO: support BlockInfoStrippedUC
|
||||
private BlockInfo completeBlock(final BlockCollection bc,
|
||||
final BlockInfo block, boolean force) throws IOException {
|
||||
BlockInfoContiguous[] fileBlocks = bc.getBlocks();
|
||||
BlockInfo[] fileBlocks = bc.getBlocks();
|
||||
for (int idx = 0; idx < fileBlocks.length; idx++) {
|
||||
if (fileBlocks[idx] == block) {
|
||||
return completeBlock(bc, idx, force);
|
||||
|
@ -698,6 +699,7 @@ public class BlockManager {
|
|||
*/
|
||||
public BlockInfo forceCompleteBlock(final BlockCollection bc,
|
||||
final BlockInfoContiguousUnderConstruction block) throws IOException {
|
||||
// TODO: support BlockInfoStripedUC for editlog
|
||||
block.commitBlock(block);
|
||||
return completeBlock(bc, block, true);
|
||||
}
|
||||
|
@ -719,7 +721,7 @@ public class BlockManager {
|
|||
*/
|
||||
public LocatedBlock convertLastBlockToUnderConstruction(
|
||||
BlockCollection bc, long bytesToRemove) throws IOException {
|
||||
BlockInfoContiguous oldBlock = bc.getLastBlock();
|
||||
BlockInfo oldBlock = bc.getLastBlock();
|
||||
if(oldBlock == null ||
|
||||
bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove)
|
||||
return null;
|
||||
|
@ -728,8 +730,10 @@ public class BlockManager {
|
|||
|
||||
DatanodeStorageInfo[] targets = getStorages(oldBlock);
|
||||
|
||||
BlockInfoContiguousUnderConstruction ucBlock = bc.setLastBlock(oldBlock,
|
||||
targets);
|
||||
// convert the last block to UC
|
||||
bc.convertLastBlockToUC(oldBlock, targets);
|
||||
// get the new created uc block
|
||||
BlockInfo ucBlock = bc.getLastBlock();
|
||||
blocksMap.replaceBlock(ucBlock);
|
||||
|
||||
// Remove block from replication queue.
|
||||
|
@ -771,11 +775,10 @@ public class BlockManager {
|
|||
return locations;
|
||||
}
|
||||
|
||||
private List<LocatedBlock> createLocatedBlockList(
|
||||
final BlockInfoContiguous[] blocks,
|
||||
private List<LocatedBlock> createLocatedBlockList(final BlockInfo[] blocks,
|
||||
final long offset, final long length, final int nrBlocksToReturn,
|
||||
final AccessMode mode) throws IOException {
|
||||
int curBlk = 0;
|
||||
int curBlk;
|
||||
long curPos = 0, blkSize = 0;
|
||||
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
|
||||
for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
|
||||
|
@ -788,10 +791,10 @@ public class BlockManager {
|
|||
}
|
||||
|
||||
if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file
|
||||
return Collections.<LocatedBlock>emptyList();
|
||||
return Collections.emptyList();
|
||||
|
||||
long endOff = offset + length;
|
||||
List<LocatedBlock> results = new ArrayList<LocatedBlock>(blocks.length);
|
||||
List<LocatedBlock> results = new ArrayList<>(blocks.length);
|
||||
do {
|
||||
results.add(createLocatedBlock(blocks[curBlk], curPos, mode));
|
||||
curPos += blocks[curBlk].getNumBytes();
|
||||
|
@ -802,9 +805,9 @@ public class BlockManager {
|
|||
return results;
|
||||
}
|
||||
|
||||
private LocatedBlock createLocatedBlock(final BlockInfoContiguous[] blocks,
|
||||
private LocatedBlock createLocatedBlock(final BlockInfo[] blocks,
|
||||
final long endPos, final AccessMode mode) throws IOException {
|
||||
int curBlk = 0;
|
||||
int curBlk;
|
||||
long curPos = 0;
|
||||
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
|
||||
for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
|
||||
|
@ -817,8 +820,8 @@ public class BlockManager {
|
|||
|
||||
return createLocatedBlock(blocks[curBlk], curPos, mode);
|
||||
}
|
||||
|
||||
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos,
|
||||
|
||||
private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos,
|
||||
final AccessMode mode) throws IOException {
|
||||
final LocatedBlock lb = createLocatedBlock(blk, pos);
|
||||
if (mode != null) {
|
||||
|
@ -828,8 +831,8 @@ public class BlockManager {
|
|||
}
|
||||
|
||||
/** @return a LocatedBlock for the given block */
|
||||
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos
|
||||
) throws IOException {
|
||||
private LocatedBlock createLocatedBlock(final BlockInfo blk,
|
||||
final long pos) throws IOException {
|
||||
if (blk instanceof BlockInfoContiguousUnderConstruction) {
|
||||
if (blk.isComplete()) {
|
||||
throw new IOException(
|
||||
|
@ -842,6 +845,7 @@ public class BlockManager {
|
|||
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
|
||||
return newLocatedBlock(eb, storages, pos, false);
|
||||
}
|
||||
// TODO support BlockInfoStripedUC
|
||||
|
||||
// get block locations
|
||||
final int numCorruptNodes = countNodes(blk).corruptReplicas();
|
||||
|
@ -877,7 +881,7 @@ public class BlockManager {
|
|||
}
|
||||
|
||||
/** Create a LocatedBlocks. */
|
||||
public LocatedBlocks createLocatedBlocks(final BlockInfoContiguous[] blocks,
|
||||
public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
|
||||
final long fileSizeExcludeBlocksUnderConstruction,
|
||||
final boolean isFileUnderConstruction, final long offset,
|
||||
final long length, final boolean needBlockToken,
|
||||
|
@ -900,7 +904,7 @@ public class BlockManager {
|
|||
final LocatedBlock lastlb;
|
||||
final boolean isComplete;
|
||||
if (!inSnapshot) {
|
||||
final BlockInfoContiguous last = blocks[blocks.length - 1];
|
||||
final BlockInfo last = blocks[blocks.length - 1];
|
||||
final long lastPos = last.isComplete()?
|
||||
fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
|
||||
: fileSizeExcludeBlocksUnderConstruction;
|
||||
|
@ -1724,12 +1728,15 @@ public class BlockManager {
|
|||
* reported by the datanode in the block report.
|
||||
*/
|
||||
static class StatefulBlockInfo {
|
||||
final BlockInfoContiguousUnderConstruction storedBlock;
|
||||
final BlockInfo storedBlock; // should be UC block
|
||||
final Block reportedBlock;
|
||||
final ReplicaState reportedState;
|
||||
|
||||
StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock,
|
||||
StatefulBlockInfo(BlockInfo storedBlock,
|
||||
Block reportedBlock, ReplicaState reportedState) {
|
||||
Preconditions.checkArgument(
|
||||
storedBlock instanceof BlockInfoContiguousUnderConstruction ||
|
||||
storedBlock instanceof BlockInfoStripedUnderConstruction);
|
||||
this.storedBlock = storedBlock;
|
||||
this.reportedBlock = reportedBlock;
|
||||
this.reportedState = reportedState;
|
||||
|
@ -2133,15 +2140,12 @@ public class BlockManager {
|
|||
|
||||
// If block is under construction, add this replica to its list
|
||||
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
|
||||
((BlockInfoContiguousUnderConstruction) storedBlock)
|
||||
.addReplicaIfNotPresent(storageInfo, iblk, reportedState);
|
||||
BlockInfo.addReplica(storedBlock, storageInfo, iblk, reportedState);
|
||||
// OpenFileBlocks only inside snapshots also will be added to safemode
|
||||
// threshold. So we need to update such blocks to safemode
|
||||
// refer HDFS-5283
|
||||
BlockInfoContiguousUnderConstruction blockUC =
|
||||
(BlockInfoContiguousUnderConstruction) storedBlock;
|
||||
if (namesystem.isInSnapshot(blockUC)) {
|
||||
int numOfReplicas = blockUC.getNumExpectedLocations();
|
||||
if (namesystem.isInSnapshot(storedBlock.getBlockCollection())) {
|
||||
int numOfReplicas = BlockInfo.getNumExpectedLocations(storedBlock);
|
||||
namesystem.incrementSafeBlockCount(numOfReplicas);
|
||||
}
|
||||
//and fall through to next clause
|
||||
|
@ -2164,7 +2168,7 @@ public class BlockManager {
|
|||
// place a delimiter in the list which separates blocks
|
||||
// that have been reported from those that have not
|
||||
Block delimiterBlock = new Block();
|
||||
BlockInfoContiguous delimiter = new BlockInfoContiguous(delimiterBlock,
|
||||
BlockInfo delimiter = new BlockInfoContiguous(delimiterBlock,
|
||||
(short) 1);
|
||||
AddBlockResult result = storageInfo.addBlock(delimiter, delimiterBlock);
|
||||
assert result == AddBlockResult.ADDED
|
||||
|
@ -2294,9 +2298,8 @@ public class BlockManager {
|
|||
}
|
||||
|
||||
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
|
||||
toUC.add(new StatefulBlockInfo(
|
||||
(BlockInfoContiguousUnderConstruction) storedBlock,
|
||||
new Block(block), reportedState));
|
||||
toUC.add(new StatefulBlockInfo(storedBlock, new Block(block),
|
||||
reportedState));
|
||||
return storedBlock;
|
||||
}
|
||||
|
||||
|
@ -2487,9 +2490,8 @@ public class BlockManager {
|
|||
|
||||
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
|
||||
DatanodeStorageInfo storageInfo) throws IOException {
|
||||
BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock;
|
||||
block.addReplicaIfNotPresent(
|
||||
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
|
||||
BlockInfo block = ucBlock.storedBlock;
|
||||
BlockInfo.addReplica(block, storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
|
||||
|
||||
if (ucBlock.reportedState == ReplicaState.FINALIZED &&
|
||||
(block.findStorageInfo(storageInfo) < 0)) {
|
||||
|
@ -2549,7 +2551,8 @@ public class BlockManager {
|
|||
assert block != null && namesystem.hasWriteLock();
|
||||
BlockInfo storedBlock;
|
||||
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||
if (block instanceof BlockInfoContiguousUnderConstruction) {
|
||||
if (block instanceof BlockInfoContiguousUnderConstruction ||
|
||||
block instanceof BlockInfoStripedUnderConstruction) {
|
||||
//refresh our copy in case the block got completed in another thread
|
||||
storedBlock = getStoredBlock(block);
|
||||
} else {
|
||||
|
@ -2565,7 +2568,6 @@ public class BlockManager {
|
|||
return block;
|
||||
}
|
||||
BlockCollection bc = storedBlock.getBlockCollection();
|
||||
assert bc != null : "Block must belong to a file";
|
||||
|
||||
// add block to the datanode
|
||||
AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock);
|
||||
|
@ -3493,8 +3495,8 @@ public class BlockManager {
|
|||
* replicated.
|
||||
*/
|
||||
public boolean checkBlocksProperlyReplicated(
|
||||
String src, BlockInfoContiguous[] blocks) {
|
||||
for (BlockInfoContiguous b: blocks) {
|
||||
String src, BlockInfo[] blocks) {
|
||||
for (BlockInfo b: blocks) {
|
||||
if (!b.isComplete()) {
|
||||
final BlockInfoContiguousUnderConstruction uc =
|
||||
(BlockInfoContiguousUnderConstruction)b;
|
||||
|
@ -3563,7 +3565,7 @@ public class BlockManager {
|
|||
if (!this.shouldCheckForEnoughRacks) {
|
||||
return true;
|
||||
}
|
||||
boolean enoughRacks = false;;
|
||||
boolean enoughRacks = false;
|
||||
Collection<DatanodeDescriptor> corruptNodes =
|
||||
corruptReplicas.getNodes(b);
|
||||
int numExpectedReplicas = getReplication(b);
|
||||
|
@ -3609,21 +3611,15 @@ public class BlockManager {
|
|||
return this.neededReplications.getCorruptReplOneBlockSize();
|
||||
}
|
||||
|
||||
public BlockInfoContiguous addBlockCollection(BlockInfoContiguous block,
|
||||
public BlockInfo addBlockCollection(BlockInfo block,
|
||||
BlockCollection bc) {
|
||||
// TODO
|
||||
return (BlockInfoContiguous) blocksMap.addBlockCollection(block, bc);
|
||||
return blocksMap.addBlockCollection(block, bc);
|
||||
}
|
||||
|
||||
public BlockCollection getBlockCollection(Block b) {
|
||||
return blocksMap.getBlockCollection(b);
|
||||
}
|
||||
|
||||
/** @return an iterator of the datanodes. */
|
||||
public Iterable<DatanodeStorageInfo> getStorages(final Block block) {
|
||||
return blocksMap.getStorages(block);
|
||||
}
|
||||
|
||||
public int numCorruptReplicas(Block block) {
|
||||
return corruptReplicas.numCorruptReplicas(block);
|
||||
}
|
||||
|
@ -3656,26 +3652,6 @@ public class BlockManager {
|
|||
public int getCapacity() {
|
||||
return blocksMap.getCapacity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a range of corrupt replica block ids. Up to numExpectedBlocks
|
||||
* blocks starting at the next block after startingBlockId are returned
|
||||
* (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId
|
||||
* is null, up to numExpectedBlocks blocks are returned from the beginning.
|
||||
* If startingBlockId cannot be found, null is returned.
|
||||
*
|
||||
* @param numExpectedBlocks Number of block ids to return.
|
||||
* 0 <= numExpectedBlocks <= 100
|
||||
* @param startingBlockId Block id from which to start. If null, start at
|
||||
* beginning.
|
||||
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
|
||||
*
|
||||
*/
|
||||
public long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
|
||||
Long startingBlockId) {
|
||||
return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks,
|
||||
startingBlockId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an iterator over the set of blocks for which there are no replicas.
|
||||
|
|
|
@ -369,7 +369,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
* @param file The file.
|
||||
*/
|
||||
private void rescanFile(CacheDirective directive, INodeFile file) {
|
||||
BlockInfoContiguous[] blockInfos = file.getBlocks();
|
||||
BlockInfo[] blockInfos = file.getBlocks();
|
||||
|
||||
// Increment the "needed" statistics
|
||||
directive.addFilesNeeded(1);
|
||||
|
@ -394,7 +394,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
}
|
||||
|
||||
long cachedTotal = 0;
|
||||
for (BlockInfoContiguous blockInfo : blockInfos) {
|
||||
for (BlockInfo blockInfo : blockInfos) {
|
||||
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
|
||||
// We don't try to cache blocks that are under construction.
|
||||
LOG.trace("Directive {}: can't cache block {} because it is in state "
|
||||
|
@ -452,8 +452,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
file.getFullPathName(), cachedTotal, neededTotal);
|
||||
}
|
||||
|
||||
private String findReasonForNotCaching(CachedBlock cblock,
|
||||
BlockInfoContiguous blockInfo) {
|
||||
private String findReasonForNotCaching(CachedBlock cblock,
|
||||
BlockInfo blockInfo) {
|
||||
if (blockInfo == null) {
|
||||
// Somehow, a cache report with the block arrived, but the block
|
||||
// reports from the DataNode haven't (yet?) described such a block.
|
||||
|
@ -513,7 +513,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
iter.remove();
|
||||
}
|
||||
}
|
||||
BlockInfoContiguous blockInfo = namesystem.getStoredBlock(new Block(cblock.getBlockId()));
|
||||
BlockInfo blockInfo = namesystem.getStoredBlock(new Block(cblock.getBlockId()));
|
||||
String reason = findReasonForNotCaching(cblock, blockInfo);
|
||||
int neededCached = 0;
|
||||
if (reason != null) {
|
||||
|
@ -627,7 +627,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
List<DatanodeDescriptor> pendingCached) {
|
||||
// To figure out which replicas can be cached, we consult the
|
||||
// blocksMap. We don't want to try to cache a corrupt replica, though.
|
||||
BlockInfoContiguous blockInfo = namesystem.getStoredBlock(new Block(cachedBlock.getBlockId()));
|
||||
BlockInfo blockInfo = namesystem.getStoredBlock(new Block(cachedBlock.getBlockId()));
|
||||
if (blockInfo == null) {
|
||||
LOG.debug("Block {}: can't add new cached replicas," +
|
||||
" because there is no record of this block " +
|
||||
|
@ -665,7 +665,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
Iterator<CachedBlock> it = datanode.getPendingCached().iterator();
|
||||
while (it.hasNext()) {
|
||||
CachedBlock cBlock = it.next();
|
||||
BlockInfoContiguous info =
|
||||
BlockInfo info =
|
||||
namesystem.getStoredBlock(new Block(cBlock.getBlockId()));
|
||||
if (info != null) {
|
||||
pendingBytes -= info.getNumBytes();
|
||||
|
@ -675,7 +675,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
// Add pending uncached blocks from effective capacity
|
||||
while (it.hasNext()) {
|
||||
CachedBlock cBlock = it.next();
|
||||
BlockInfoContiguous info =
|
||||
BlockInfo info =
|
||||
namesystem.getStoredBlock(new Block(cBlock.getBlockId()));
|
||||
if (info != null) {
|
||||
pendingBytes += info.getNumBytes();
|
||||
|
|
|
@ -143,6 +143,7 @@ class FSDirConcatOp {
|
|||
throw new HadoopIllegalArgumentException("concat: source file " + src
|
||||
+ " is invalid or empty or underConstruction");
|
||||
}
|
||||
|
||||
// source file's preferred block size cannot be greater than the target
|
||||
// file
|
||||
if (srcINodeFile.getPreferredBlockSize() >
|
||||
|
@ -152,6 +153,11 @@ class FSDirConcatOp {
|
|||
+ " which is greater than the target file's preferred block size "
|
||||
+ targetINode.getPreferredBlockSize());
|
||||
}
|
||||
// TODO currently we do not support concatenating EC files
|
||||
if (srcINodeFile.isStriped()) {
|
||||
throw new HadoopIllegalArgumentException("concat: the src file " + src
|
||||
+ " is with striped blocks");
|
||||
}
|
||||
si.add(srcINodeFile);
|
||||
}
|
||||
|
||||
|
@ -228,7 +234,7 @@ class FSDirConcatOp {
|
|||
int count = 0;
|
||||
for (INodeFile nodeToRemove : srcList) {
|
||||
if(nodeToRemove != null) {
|
||||
nodeToRemove.setBlocks(null);
|
||||
nodeToRemove.setContiguousBlocks(null);
|
||||
nodeToRemove.getParent().removeChild(nodeToRemove);
|
||||
fsd.getINodeMap().remove(nodeToRemove);
|
||||
count++;
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
|
@ -919,7 +920,7 @@ public class FSDirectory implements Closeable {
|
|||
unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);
|
||||
|
||||
if(! onBlockBoundary) {
|
||||
BlockInfoContiguous oldBlock = file.getLastBlock();
|
||||
BlockInfo oldBlock = file.getLastBlock();
|
||||
Block tBlk =
|
||||
getFSNamesystem().prepareFileForTruncate(iip,
|
||||
clientName, clientMachine, file.computeFileSize() - newLength,
|
||||
|
@ -928,7 +929,7 @@ public class FSDirectory implements Closeable {
|
|||
tBlk.getNumBytes() == truncateBlock.getNumBytes() :
|
||||
"Should be the same block.";
|
||||
if(oldBlock.getBlockId() != tBlk.getBlockId() &&
|
||||
!file.isBlockInLatestSnapshot(oldBlock)) {
|
||||
!file.isBlockInLatestSnapshot((BlockInfoContiguous) oldBlock)) {
|
||||
getBlockManager().removeBlockFromMap(oldBlock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
|
@ -773,10 +773,10 @@ public class FSEditLog implements LogsPurgeable {
|
|||
|
||||
public void logAddBlock(String path, INodeFile file) {
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
BlockInfoContiguous[] blocks = file.getBlocks();
|
||||
BlockInfo[] blocks = file.getBlocks();
|
||||
Preconditions.checkState(blocks != null && blocks.length > 0);
|
||||
BlockInfoContiguous pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
|
||||
BlockInfoContiguous lastBlock = blocks[blocks.length - 1];
|
||||
BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
|
||||
BlockInfo lastBlock = blocks[blocks.length - 1];
|
||||
AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
|
||||
.setPenultimateBlock(pBlock).setLastBlock(lastBlock);
|
||||
logEdit(op);
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
|
@ -506,7 +507,7 @@ public class FSEditLogLoader {
|
|||
}
|
||||
INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path);
|
||||
// add the new block to the INodeFile
|
||||
addNewBlock(fsDir, addBlockOp, oldFile);
|
||||
addNewBlock(addBlockOp, oldFile);
|
||||
break;
|
||||
}
|
||||
case OP_SET_REPLICATION: {
|
||||
|
@ -939,15 +940,15 @@ public class FSEditLogLoader {
|
|||
|
||||
/**
|
||||
* Add a new block into the given INodeFile
|
||||
* TODO support adding striped block
|
||||
*/
|
||||
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
|
||||
throws IOException {
|
||||
BlockInfoContiguous[] oldBlocks = file.getBlocks();
|
||||
private void addNewBlock(AddBlockOp op, INodeFile file) throws IOException {
|
||||
BlockInfo[] oldBlocks = file.getBlocks();
|
||||
Block pBlock = op.getPenultimateBlock();
|
||||
Block newBlock= op.getLastBlock();
|
||||
|
||||
if (pBlock != null) { // the penultimate block is not null
|
||||
Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0);
|
||||
assert oldBlocks != null && oldBlocks.length > 0;
|
||||
// compare pBlock with the last block of oldBlocks
|
||||
Block oldLastBlock = oldBlocks[oldBlocks.length - 1];
|
||||
if (oldLastBlock.getBlockId() != pBlock.getBlockId()
|
||||
|
@ -977,12 +978,13 @@ public class FSEditLogLoader {
|
|||
|
||||
/**
|
||||
* Update in-memory data structures with new block information.
|
||||
* TODO support adding striped block
|
||||
* @throws IOException
|
||||
*/
|
||||
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
|
||||
INodesInPath iip, INodeFile file) throws IOException {
|
||||
// Update its block list
|
||||
BlockInfoContiguous[] oldBlocks = file.getBlocks();
|
||||
BlockInfo[] oldBlocks = file.getBlocks();
|
||||
Block[] newBlocks = op.getBlocks();
|
||||
String path = op.getPath();
|
||||
|
||||
|
@ -991,7 +993,7 @@ public class FSEditLogLoader {
|
|||
|
||||
// First, update blocks in common
|
||||
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
|
||||
BlockInfoContiguous oldBlock = oldBlocks[i];
|
||||
BlockInfo oldBlock = oldBlocks[i];
|
||||
Block newBlock = newBlocks[i];
|
||||
|
||||
boolean isLastBlock = i == newBlocks.length - 1;
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|||
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
|
@ -686,7 +687,7 @@ public class FSImageFormat {
|
|||
|
||||
public void updateBlocksMap(INodeFile file) {
|
||||
// Add file->block mapping
|
||||
final BlockInfoContiguous[] blocks = file.getBlocks();
|
||||
final BlockInfo[] blocks = file.getBlocks();
|
||||
if (blocks != null) {
|
||||
final BlockManager bm = namesystem.getBlockManager();
|
||||
for (int i = 0; i < blocks.length; i++) {
|
||||
|
@ -958,9 +959,9 @@ public class FSImageFormat {
|
|||
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
|
||||
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
||||
if (oldnode.numBlocks() > 0) {
|
||||
BlockInfoContiguous ucBlock = cons.getLastBlock();
|
||||
BlockInfo ucBlock = cons.getLastBlock();
|
||||
// we do not replace the inode, just replace the last block of oldnode
|
||||
BlockInfoContiguous info = namesystem.getBlockManager().addBlockCollection(
|
||||
BlockInfo info = namesystem.getBlockManager().addBlockCollection(
|
||||
ucBlock, oldnode);
|
||||
oldnode.setBlock(oldnode.numBlocks() - 1, info);
|
||||
}
|
||||
|
|
|
@ -42,9 +42,13 @@ import org.apache.hadoop.fs.XAttr;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
|
||||
|
@ -53,6 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructio
|
|||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.StripedBlocksFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto;
|
||||
|
@ -211,7 +216,7 @@ public final class FSImageFormatPBINode {
|
|||
|
||||
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
|
||||
// Add file->block mapping
|
||||
final BlockInfoContiguous[] blocks = file.getBlocks();
|
||||
final BlockInfo[] blocks = file.getBlocks();
|
||||
if (blocks != null) {
|
||||
for (int i = 0; i < blocks.length; i++) {
|
||||
file.setBlock(i, bm.addBlockCollection(blocks[i], file));
|
||||
|
@ -347,16 +352,30 @@ public final class FSImageFormatPBINode {
|
|||
loadXAttrs(f.getXAttrs(), state.getStringTable())));
|
||||
}
|
||||
|
||||
FileWithStripedBlocksFeature stripeFeature = null;
|
||||
if (f.hasStripedBlocks()) {
|
||||
StripedBlocksFeature sb = f.getStripedBlocks();
|
||||
stripeFeature = file.addStripedBlocksFeature();
|
||||
for (StripedBlockProto sp : sb.getBlocksList()) {
|
||||
stripeFeature.addBlock(PBHelper.convert(sp));
|
||||
}
|
||||
}
|
||||
|
||||
// under-construction information
|
||||
if (f.hasFileUC()) {
|
||||
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
|
||||
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
||||
if (blocks.length > 0) {
|
||||
BlockInfoContiguous lastBlk = file.getLastBlock();
|
||||
// replace the last block of file
|
||||
file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction(
|
||||
lastBlk, replication));
|
||||
BlockInfo lastBlk = file.getLastBlock();
|
||||
// replace the last block of file
|
||||
final BlockInfo ucBlk;
|
||||
if (stripeFeature != null) {
|
||||
BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
|
||||
ucBlk = new BlockInfoStripedUnderConstruction(striped,
|
||||
striped.getDataBlockNum(), striped.getParityBlockNum());
|
||||
} else {
|
||||
ucBlk = new BlockInfoContiguousUnderConstruction(lastBlk, replication);
|
||||
}
|
||||
file.setBlock(file.numBlocks() - 1, ucBlk);
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
@ -630,6 +649,19 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
}
|
||||
|
||||
FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature();
|
||||
if (sb != null) {
|
||||
StripedBlocksFeature.Builder builder =
|
||||
StripedBlocksFeature.newBuilder();
|
||||
BlockInfoStriped[] sblocks = sb.getBlocks();
|
||||
if (sblocks != null) {
|
||||
for (BlockInfoStriped sblk : sblocks) {
|
||||
builder.addBlocks(PBHelper.convert(sblk));
|
||||
}
|
||||
}
|
||||
b.setStripedBlocks(builder.build());
|
||||
}
|
||||
|
||||
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
|
||||
if (uc != null) {
|
||||
INodeSection.FileUnderConstructionFeature f =
|
||||
|
@ -658,7 +690,7 @@ public final class FSImageFormatPBINode {
|
|||
r.writeDelimitedTo(out);
|
||||
}
|
||||
|
||||
private final INodeSection.INode.Builder buildINodeCommon(INode n) {
|
||||
private INodeSection.INode.Builder buildINodeCommon(INode n) {
|
||||
return INodeSection.INode.newBuilder()
|
||||
.setId(n.getId())
|
||||
.setName(ByteString.copyFrom(n.getLocalNameBytes()));
|
||||
|
|
|
@ -203,8 +203,10 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||
|
@ -2009,6 +2011,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
final BlockStoragePolicy lpPolicy =
|
||||
blockManager.getStoragePolicy("LAZY_PERSIST");
|
||||
|
||||
// not support truncating file with striped blocks
|
||||
if (file.isStriped()) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Cannot truncate file with striped block " + src);
|
||||
}
|
||||
if (lpPolicy != null &&
|
||||
lpPolicy.getId() == file.getStoragePolicyID()) {
|
||||
throw new UnsupportedOperationException(
|
||||
|
@ -2090,8 +2097,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
leaseManager.addLease(
|
||||
file.getFileUnderConstructionFeature().getClientName(), file.getId());
|
||||
boolean shouldRecoverNow = (newBlock == null);
|
||||
BlockInfoContiguous oldBlock = file.getLastBlock();
|
||||
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
|
||||
|
||||
BlockInfo oldBlock = file.getLastBlock();
|
||||
assert oldBlock instanceof BlockInfoContiguous;
|
||||
|
||||
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file,
|
||||
(BlockInfoContiguous) oldBlock);
|
||||
if(newBlock == null) {
|
||||
newBlock = (shouldCopyOnTruncate) ? createNewBlock(file.isStriped()) :
|
||||
new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(),
|
||||
|
@ -2106,7 +2117,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
file.getPreferredBlockReplication());
|
||||
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
|
||||
truncatedBlockUC.setTruncateBlock(oldBlock);
|
||||
file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
|
||||
file.convertLastBlockToUC(truncatedBlockUC,
|
||||
blockManager.getStorages(oldBlock));
|
||||
getBlockManager().addBlockCollection(truncatedBlockUC, file);
|
||||
|
||||
NameNode.stateChangeLog.info("BLOCK* prepareFileForTruncate: "
|
||||
|
@ -2494,6 +2506,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
+ src + " for client " + clientMachine);
|
||||
}
|
||||
INodeFile myFile = INodeFile.valueOf(inode, src, true);
|
||||
|
||||
// not support appending file with striped blocks
|
||||
if (myFile.isStriped()) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Cannot truncate file with striped block " + src);
|
||||
}
|
||||
|
||||
final BlockStoragePolicy lpPolicy =
|
||||
blockManager.getStoragePolicy("LAZY_PERSIST");
|
||||
if (lpPolicy != null &&
|
||||
|
@ -2505,7 +2524,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder,
|
||||
clientMachine, false);
|
||||
|
||||
final BlockInfoContiguous lastBlock = myFile.getLastBlock();
|
||||
final BlockInfoContiguous lastBlock =
|
||||
(BlockInfoContiguous) myFile.getLastBlock();
|
||||
// Check that the block has at least minimum replication.
|
||||
if(lastBlock != null && lastBlock.isComplete() &&
|
||||
!getBlockManager().isSufficientlyReplicated(lastBlock)) {
|
||||
|
@ -2561,7 +2581,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
BlockInfoContiguous lastBlock = file.getLastBlock();
|
||||
BlockInfo lastBlock = file.getLastBlock();
|
||||
if (lastBlock != null) {
|
||||
ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
|
||||
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
|
||||
|
@ -2740,7 +2760,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
op.getExceptionMessage(src, holder, clientMachine,
|
||||
"lease recovery is in progress. Try again later."));
|
||||
} else {
|
||||
final BlockInfoContiguous lastBlock = file.getLastBlock();
|
||||
final BlockInfo lastBlock = file.getLastBlock();
|
||||
if (lastBlock != null
|
||||
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
|
||||
throw new RecoveryInProgressException(
|
||||
|
@ -3066,13 +3086,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
.getBlocks());
|
||||
} else {
|
||||
// check the penultimate block of this file
|
||||
BlockInfoContiguous b = v.getPenultimateBlock();
|
||||
BlockInfo b = v.getPenultimateBlock();
|
||||
return b == null ||
|
||||
blockManager.checkBlocksProperlyReplicated(
|
||||
src, new BlockInfoContiguous[] { b });
|
||||
src, new BlockInfo[] { b });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Change the indicated filename.
|
||||
* @deprecated Use {@link #renameTo(String, String, boolean,
|
||||
|
@ -3243,7 +3263,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
for (Block b : blocks.getToDeleteList()) {
|
||||
if (trackBlockCounts) {
|
||||
BlockInfoContiguous bi = getStoredBlock(b);
|
||||
BlockInfo bi = getStoredBlock(b);
|
||||
if (bi.isComplete()) {
|
||||
numRemovedComplete++;
|
||||
if (bi.numNodes() >= blockManager.minReplication) {
|
||||
|
@ -3467,10 +3487,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
final INodeFile pendingFile = iip.getLastINode().asFile();
|
||||
int nrBlocks = pendingFile.numBlocks();
|
||||
BlockInfoContiguous[] blocks = pendingFile.getBlocks();
|
||||
BlockInfo[] blocks = pendingFile.getBlocks();
|
||||
|
||||
int nrCompleteBlocks;
|
||||
BlockInfoContiguous curBlock = null;
|
||||
BlockInfo curBlock = null;
|
||||
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
|
||||
curBlock = blocks[nrCompleteBlocks];
|
||||
if(!curBlock.isComplete())
|
||||
|
@ -3505,12 +3525,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
// The last block is not COMPLETE, and
|
||||
// that the penultimate block if exists is either COMPLETE or COMMITTED
|
||||
final BlockInfoContiguous lastBlock = pendingFile.getLastBlock();
|
||||
final BlockInfo lastBlock = pendingFile.getLastBlock();
|
||||
BlockUCState lastBlockState = lastBlock.getBlockUCState();
|
||||
BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
|
||||
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
|
||||
|
||||
// If penultimate block doesn't exist then its minReplication is met
|
||||
boolean penultimateBlockMinReplication = penultimateBlock == null ? true :
|
||||
boolean penultimateBlockMinReplication = penultimateBlock == null ||
|
||||
blockManager.checkMinReplication(penultimateBlock);
|
||||
|
||||
switch(lastBlockState) {
|
||||
|
@ -3540,6 +3560,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
throw new AlreadyBeingCreatedException(message);
|
||||
case UNDER_CONSTRUCTION:
|
||||
case UNDER_RECOVERY:
|
||||
// TODO support Striped block's recovery
|
||||
final BlockInfoContiguousUnderConstruction uc =
|
||||
(BlockInfoContiguousUnderConstruction)lastBlock;
|
||||
// determine if last block was intended to be truncated
|
||||
|
@ -3651,14 +3672,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
blockManager.checkReplication(pendingFile);
|
||||
}
|
||||
|
||||
public BlockInfoContiguous getStoredBlock(Block block) {
|
||||
return (BlockInfoContiguous) blockManager.getStoredBlock(block);
|
||||
public BlockInfo getStoredBlock(Block block) {
|
||||
return blockManager.getStoredBlock(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) {
|
||||
public boolean isInSnapshot(BlockCollection bc) {
|
||||
assert hasReadLock();
|
||||
final BlockCollection bc = blockUC.getBlockCollection();
|
||||
if (bc == null || !(bc instanceof INodeFile)
|
||||
|| !bc.isUnderConstruction()) {
|
||||
return false;
|
||||
|
@ -3703,7 +3723,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
waitForLoadingFSImage();
|
||||
writeLock();
|
||||
boolean copyTruncate = false;
|
||||
BlockInfoContiguousUnderConstruction truncatedBlock = null;
|
||||
BlockInfo truncatedBlock = null;
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
// If a DN tries to commit to the standby, the recovery will
|
||||
|
@ -3711,7 +3731,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
checkNameNodeSafeMode(
|
||||
"Cannot commitBlockSynchronization while in safe mode");
|
||||
final BlockInfoContiguous storedBlock = getStoredBlock(
|
||||
final BlockInfo storedBlock = getStoredBlock(
|
||||
ExtendedBlock.getLocalBlock(oldBlock));
|
||||
if (storedBlock == null) {
|
||||
if (deleteblock) {
|
||||
|
@ -3760,9 +3780,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
return;
|
||||
}
|
||||
|
||||
truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile
|
||||
.getLastBlock();
|
||||
long recoveryId = truncatedBlock.getBlockRecoveryId();
|
||||
truncatedBlock = iFile.getLastBlock();
|
||||
long recoveryId = BlockInfo.getBlockRecoveryId(truncatedBlock);
|
||||
copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
|
||||
if(recoveryId != newgenerationstamp) {
|
||||
throw new IOException("The recovery id " + newgenerationstamp
|
||||
|
@ -3776,8 +3795,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
if (remove) {
|
||||
blockManager.removeBlock(storedBlock);
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// update last block
|
||||
if(!copyTruncate) {
|
||||
storedBlock.setGenerationStamp(newgenerationstamp);
|
||||
|
@ -3825,9 +3843,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]),
|
||||
trimmedStorages.toArray(new String[trimmedStorages.size()]));
|
||||
if(copyTruncate) {
|
||||
iFile.setLastBlock(truncatedBlock, trimmedStorageInfos);
|
||||
iFile.convertLastBlockToUC(truncatedBlock, trimmedStorageInfos);
|
||||
} else {
|
||||
iFile.setLastBlock(storedBlock, trimmedStorageInfos);
|
||||
iFile.convertLastBlockToUC(storedBlock, trimmedStorageInfos);
|
||||
if (closeFile) {
|
||||
blockManager.markBlockReplicasAsCorrupt(storedBlock,
|
||||
oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
|
||||
|
@ -3838,7 +3856,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
if (closeFile) {
|
||||
if(copyTruncate) {
|
||||
src = closeFileCommitBlocks(iFile, truncatedBlock);
|
||||
if(!iFile.isBlockInLatestSnapshot(storedBlock)) {
|
||||
if(!iFile.isBlockInLatestSnapshot((BlockInfoContiguous) storedBlock)) {
|
||||
blockManager.removeBlock(storedBlock);
|
||||
}
|
||||
} else {
|
||||
|
@ -3872,7 +3890,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
* @throws IOException on error
|
||||
*/
|
||||
@VisibleForTesting
|
||||
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
|
||||
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
|
||||
throws IOException {
|
||||
final INodesInPath iip = INodesInPath.fromINode(pendingFile);
|
||||
final String src = iip.getPath();
|
||||
|
@ -4163,7 +4181,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
while (it.hasNext()) {
|
||||
Block b = it.next();
|
||||
BlockInfoContiguous blockInfo = getStoredBlock(b);
|
||||
BlockInfo blockInfo = getStoredBlock(b);
|
||||
if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) {
|
||||
filesToDelete.add(blockInfo.getBlockCollection());
|
||||
}
|
||||
|
@ -5105,7 +5123,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
SafeModeInfo safeMode = this.safeMode;
|
||||
if (safeMode == null) // mostly true
|
||||
return;
|
||||
BlockInfoContiguous storedBlock = getStoredBlock(b);
|
||||
BlockInfo storedBlock = getStoredBlock(b);
|
||||
if (storedBlock.isComplete()) {
|
||||
safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
|
||||
}
|
||||
|
@ -5667,7 +5685,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
+ "access token for block " + block);
|
||||
|
||||
// check stored block state
|
||||
BlockInfoContiguous storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
|
||||
BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
|
||||
if (storedBlock == null ||
|
||||
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
|
||||
throw new IOException(block +
|
||||
|
@ -5796,8 +5814,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
assert hasWriteLock();
|
||||
// check the vadility of the block and lease holder name
|
||||
final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
|
||||
final BlockInfoContiguousUnderConstruction blockinfo
|
||||
= (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock();
|
||||
final BlockInfo lastBlock = pendingFile.getLastBlock();
|
||||
// when updating pipeline, the last block must be contiguous block
|
||||
assert lastBlock instanceof BlockInfoContiguousUnderConstruction;
|
||||
BlockInfoContiguousUnderConstruction blockinfo =
|
||||
(BlockInfoContiguousUnderConstruction) lastBlock;
|
||||
|
||||
// check new GS & length: this is not expected
|
||||
if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||
|
||||
|
|
|
@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
|
||||
/**
|
||||
|
@ -58,12 +60,12 @@ public class FileUnderConstructionFeature implements INode.Feature {
|
|||
*/
|
||||
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
|
||||
throws IOException {
|
||||
BlockInfoContiguous lastBlock = f.getLastBlock();
|
||||
BlockInfo lastBlock = f.getLastBlock();
|
||||
assert (lastBlock != null) : "The last block for path "
|
||||
+ f.getFullPathName() + " is null when updating its length";
|
||||
assert (lastBlock instanceof BlockInfoContiguousUnderConstruction)
|
||||
assert !lastBlock.isComplete()
|
||||
: "The last block for path " + f.getFullPathName()
|
||||
+ " is not a BlockInfoUnderConstruction when updating its length";
|
||||
+ " is not a BlockInfoUnderConstruction when updating its length";
|
||||
lastBlock.setNumBytes(lastBlockLength);
|
||||
}
|
||||
|
||||
|
@ -74,11 +76,10 @@ public class FileUnderConstructionFeature implements INode.Feature {
|
|||
*/
|
||||
void cleanZeroSizeBlock(final INodeFile f,
|
||||
final BlocksMapUpdateInfo collectedBlocks) {
|
||||
final BlockInfoContiguous[] blocks = f.getBlocks();
|
||||
final BlockInfo[] blocks = f.getBlocks();
|
||||
if (blocks != null && blocks.length > 0
|
||||
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
|
||||
BlockInfoContiguousUnderConstruction lastUC =
|
||||
(BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
|
||||
&& !blocks[blocks.length - 1].isComplete()) {
|
||||
BlockInfo lastUC = blocks[blocks.length - 1];
|
||||
if (lastUC.getNumBytes() == 0) {
|
||||
// this is a 0-sized block. do not need check its UC state here
|
||||
collectedBlocks.addDeleteBlock(lastUC);
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
|
||||
/**
|
||||
* Feature for file with striped blocks
|
||||
*/
|
||||
class FileWithStripedBlocksFeature implements INode.Feature {
|
||||
private BlockInfoStriped[] blocks;
|
||||
|
||||
FileWithStripedBlocksFeature() {
|
||||
blocks = new BlockInfoStriped[0];
|
||||
}
|
||||
|
||||
FileWithStripedBlocksFeature(BlockInfoStriped[] blocks) {
|
||||
Preconditions.checkArgument(blocks != null);
|
||||
this.blocks = blocks;
|
||||
}
|
||||
|
||||
BlockInfoStriped[] getBlocks() {
|
||||
return this.blocks;
|
||||
}
|
||||
|
||||
void setBlock(int index, BlockInfoStriped blk) {
|
||||
blocks[index] = blk;
|
||||
}
|
||||
|
||||
BlockInfoStriped getLastBlock() {
|
||||
return blocks == null || blocks.length == 0 ?
|
||||
null : blocks[blocks.length - 1];
|
||||
}
|
||||
|
||||
int numBlocks() {
|
||||
return blocks == null ? 0 : blocks.length;
|
||||
}
|
||||
|
||||
void updateBlockCollection(INodeFile file) {
|
||||
if (blocks != null) {
|
||||
for (BlockInfoStriped blk : blocks) {
|
||||
blk.setBlockCollection(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void setBlocks(BlockInfoStriped[] blocks) {
|
||||
this.blocks = blocks;
|
||||
}
|
||||
|
||||
void addBlock(BlockInfoStriped newBlock) {
|
||||
if (this.blocks == null) {
|
||||
this.setBlocks(new BlockInfoStriped[]{newBlock});
|
||||
} else {
|
||||
int size = this.blocks.length;
|
||||
BlockInfoStriped[] newlist = new BlockInfoStriped[size + 1];
|
||||
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
||||
newlist[size] = newBlock;
|
||||
this.setBlocks(newlist);
|
||||
}
|
||||
}
|
||||
|
||||
boolean removeLastBlock(Block oldblock) {
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return false;
|
||||
}
|
||||
int newSize = blocks.length - 1;
|
||||
if (!blocks[newSize].equals(oldblock)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
//copy to a new list
|
||||
BlockInfoStriped[] newlist = new BlockInfoStriped[newSize];
|
||||
System.arraycopy(blocks, 0, newlist, 0, newSize);
|
||||
setBlocks(newlist);
|
||||
return true;
|
||||
}
|
||||
|
||||
void truncateStripedBlocks(int n) {
|
||||
final BlockInfoStriped[] newBlocks;
|
||||
if (n == 0) {
|
||||
newBlocks = new BlockInfoStriped[0];
|
||||
} else {
|
||||
newBlocks = new BlockInfoStriped[n];
|
||||
System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
|
||||
}
|
||||
// set new blocks
|
||||
setBlocks(newBlocks);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
this.blocks = null;
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
|
||||
|
||||
|
@ -37,12 +38,12 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
|
||||
|
@ -174,6 +175,31 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
&& getXAttrFeature() == other.getXAttrFeature();
|
||||
}
|
||||
|
||||
/* Start of StripedBlock Feature */
|
||||
|
||||
public final FileWithStripedBlocksFeature getStripedBlocksFeature() {
|
||||
return getFeature(FileWithStripedBlocksFeature.class);
|
||||
}
|
||||
|
||||
public FileWithStripedBlocksFeature addStripedBlocksFeature() {
|
||||
assert blocks == null || blocks.length == 0:
|
||||
"The file contains contiguous blocks";
|
||||
assert !isWithStripedBlocks();
|
||||
this.setFileReplication((short) 0);
|
||||
FileWithStripedBlocksFeature sb = new FileWithStripedBlocksFeature();
|
||||
addFeature(sb);
|
||||
return sb;
|
||||
}
|
||||
|
||||
public boolean isWithStripedBlocks() {
|
||||
return getStripedBlocksFeature() != null;
|
||||
}
|
||||
|
||||
/** Used to make sure there is no contiguous block related info */
|
||||
private boolean hasNoContiguousBlock() {
|
||||
return (blocks == null || blocks.length == 0) && getFileReplication() == 0;
|
||||
}
|
||||
|
||||
/* Start of Under-Construction Feature */
|
||||
|
||||
/**
|
||||
|
@ -208,7 +234,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
"file is no longer under construction");
|
||||
FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
|
||||
if (uc != null) {
|
||||
assertAllBlocksComplete();
|
||||
assertAllBlocksComplete(getBlocks());
|
||||
removeFeature(uc);
|
||||
this.setModificationTime(mtime);
|
||||
}
|
||||
|
@ -216,37 +242,56 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
/** Assert all blocks are complete. */
|
||||
private void assertAllBlocksComplete() {
|
||||
if (blocks == null) {
|
||||
private void assertAllBlocksComplete(BlockInfo[] blks) {
|
||||
if (blks == null) {
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < blocks.length; i++) {
|
||||
Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
|
||||
for (int i = 0; i < blks.length; i++) {
|
||||
Preconditions.checkState(blks[i].isComplete(), "Failed to finalize"
|
||||
+ " %s %s since blocks[%s] is non-complete, where blocks=%s.",
|
||||
getClass().getSimpleName(), this, i, Arrays.asList(blocks));
|
||||
getClass().getSimpleName(), this, i, Arrays.asList(blks));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Instead of adding a new block, this function is usually used while loading
|
||||
* fsimage or converting the last block to UC/Complete.
|
||||
*/
|
||||
@Override // BlockCollection
|
||||
public void setBlock(int index, BlockInfoContiguous blk) {
|
||||
this.blocks[index] = blk;
|
||||
public void setBlock(int index, BlockInfo blk) {
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb == null) {
|
||||
assert blk instanceof BlockInfoContiguous;
|
||||
this.blocks[index] = (BlockInfoContiguous) blk;
|
||||
} else {
|
||||
assert blk instanceof BlockInfoStriped;
|
||||
assert hasNoContiguousBlock();
|
||||
sb.setBlock(index, (BlockInfoStriped) blk);
|
||||
}
|
||||
}
|
||||
|
||||
@Override // BlockCollection, the file should be under construction
|
||||
public BlockInfoContiguousUnderConstruction setLastBlock(
|
||||
BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
|
||||
throws IOException {
|
||||
public void convertLastBlockToUC(BlockInfo lastBlock,
|
||||
DatanodeStorageInfo[] locations) throws IOException {
|
||||
Preconditions.checkState(isUnderConstruction(),
|
||||
"file is no longer under construction");
|
||||
|
||||
if (numBlocks() == 0) {
|
||||
throw new IOException("Failed to set last block: File is empty.");
|
||||
}
|
||||
BlockInfoContiguousUnderConstruction ucBlock =
|
||||
lastBlock.convertToBlockUnderConstruction(
|
||||
BlockUCState.UNDER_CONSTRUCTION, locations);
|
||||
|
||||
final BlockInfo ucBlock;
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb == null) {
|
||||
assert lastBlock instanceof BlockInfoContiguous;
|
||||
ucBlock = ((BlockInfoContiguous) lastBlock)
|
||||
.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, locations);
|
||||
} else {
|
||||
assert hasNoContiguousBlock();
|
||||
assert lastBlock instanceof BlockInfoStriped;
|
||||
ucBlock = ((BlockInfoStriped) lastBlock)
|
||||
.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, locations);
|
||||
}
|
||||
setBlock(numBlocks() - 1, ucBlock);
|
||||
return ucBlock;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -256,21 +301,27 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
|
||||
Preconditions.checkState(isUnderConstruction(),
|
||||
"file is no longer under construction");
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return null;
|
||||
}
|
||||
int size_1 = blocks.length - 1;
|
||||
if (!blocks[size_1].equals(oldblock)) {
|
||||
return null;
|
||||
}
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb == null) {
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return null;
|
||||
}
|
||||
int size_1 = blocks.length - 1;
|
||||
if (!blocks[size_1].equals(oldblock)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
BlockInfoContiguousUnderConstruction uc =
|
||||
(BlockInfoContiguousUnderConstruction)blocks[size_1];
|
||||
//copy to a new list
|
||||
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
|
||||
System.arraycopy(blocks, 0, newlist, 0, size_1);
|
||||
setBlocks(newlist);
|
||||
return uc;
|
||||
BlockInfoContiguousUnderConstruction uc =
|
||||
(BlockInfoContiguousUnderConstruction)blocks[size_1];
|
||||
//copy to a new list
|
||||
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
|
||||
System.arraycopy(blocks, 0, newlist, 0, size_1);
|
||||
setContiguousBlocks(newlist);
|
||||
return uc;
|
||||
} else {
|
||||
assert hasNoContiguousBlock();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/* End of Under-Construction Feature */
|
||||
|
@ -371,13 +422,15 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
/** Set the replication factor of this file. */
|
||||
public final void setFileReplication(short replication) {
|
||||
private void setFileReplication(short replication) {
|
||||
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
|
||||
}
|
||||
|
||||
/** Set the replication factor of this file. */
|
||||
public final INodeFile setFileReplication(short replication,
|
||||
int latestSnapshotId) throws QuotaExceededException {
|
||||
Preconditions.checkState(!isWithStripedBlocks(),
|
||||
"Cannot set replication to a file with striped blocks");
|
||||
recordModification(latestSnapshotId);
|
||||
setFileReplication(replication);
|
||||
return this;
|
||||
|
@ -415,37 +468,57 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
setStoragePolicyID(storagePolicyId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override // INodeFileAttributes
|
||||
public long getHeaderLong() {
|
||||
return header;
|
||||
}
|
||||
|
||||
/** @return the blocks of the file. */
|
||||
@Override
|
||||
public BlockInfoContiguous[] getBlocks() {
|
||||
@Override // BlockCollection
|
||||
public BlockInfo[] getBlocks() {
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb != null) {
|
||||
assert hasNoContiguousBlock();
|
||||
return sb.getBlocks();
|
||||
} else {
|
||||
return this.blocks;
|
||||
}
|
||||
}
|
||||
|
||||
/** Used by snapshot diff */
|
||||
public BlockInfoContiguous[] getContiguousBlocks() {
|
||||
return this.blocks;
|
||||
}
|
||||
|
||||
/** @return blocks of the file corresponding to the snapshot. */
|
||||
public BlockInfoContiguous[] getBlocks(int snapshot) {
|
||||
if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
|
||||
public BlockInfo[] getBlocks(int snapshot) {
|
||||
if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
|
||||
return getBlocks();
|
||||
}
|
||||
// find blocks stored in snapshot diffs (for truncate)
|
||||
FileDiff diff = getDiffs().getDiffById(snapshot);
|
||||
BlockInfoContiguous[] snapshotBlocks =
|
||||
diff == null ? getBlocks() : diff.getBlocks();
|
||||
if(snapshotBlocks != null)
|
||||
// note that currently FileDiff can only store contiguous blocks
|
||||
BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
|
||||
if (snapshotBlocks != null) {
|
||||
return snapshotBlocks;
|
||||
}
|
||||
// Blocks are not in the current snapshot
|
||||
// Find next snapshot with blocks present or return current file blocks
|
||||
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
|
||||
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
|
||||
}
|
||||
|
||||
void updateBlockCollection() {
|
||||
if (blocks != null) {
|
||||
/** Used during concat to update the BlockCollection for each block */
|
||||
private void updateBlockCollection() {
|
||||
if (blocks != null && blocks.length > 0) {
|
||||
for(BlockInfoContiguous b : blocks) {
|
||||
b.setBlockCollection(this);
|
||||
}
|
||||
} else {
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb != null) {
|
||||
sb.updateBlockCollection(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -468,27 +541,27 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
size += in.blocks.length;
|
||||
}
|
||||
|
||||
setBlocks(newlist);
|
||||
setContiguousBlocks(newlist);
|
||||
updateBlockCollection();
|
||||
}
|
||||
|
||||
/**
|
||||
* add a block to the block list
|
||||
* add a contiguous block to the block list
|
||||
*/
|
||||
void addBlock(BlockInfoContiguous newblock) {
|
||||
if (this.blocks == null) {
|
||||
this.setBlocks(new BlockInfoContiguous[]{newblock});
|
||||
this.setContiguousBlocks(new BlockInfoContiguous[]{newblock});
|
||||
} else {
|
||||
int size = this.blocks.length;
|
||||
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + 1];
|
||||
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
||||
newlist[size] = newblock;
|
||||
this.setBlocks(newlist);
|
||||
this.setContiguousBlocks(newlist);
|
||||
}
|
||||
}
|
||||
|
||||
/** Set the blocks. */
|
||||
public void setBlocks(BlockInfoContiguous[] blocks) {
|
||||
public void setContiguousBlocks(BlockInfoContiguous[] blocks) {
|
||||
this.blocks = blocks;
|
||||
}
|
||||
|
||||
|
@ -539,13 +612,19 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
public void clearFile(ReclaimContext reclaimContext) {
|
||||
if (blocks != null && reclaimContext.collectedBlocks != null) {
|
||||
for (BlockInfoContiguous blk : blocks) {
|
||||
BlockInfo[] blks = getBlocks();
|
||||
if (blks != null && reclaimContext.collectedBlocks != null) {
|
||||
for (BlockInfo blk : blks) {
|
||||
reclaimContext.collectedBlocks.addDeleteBlock(blk);
|
||||
blk.setBlockCollection(null);
|
||||
}
|
||||
}
|
||||
setBlocks(null);
|
||||
setContiguousBlocks(null);
|
||||
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb != null) {
|
||||
sb.clear();
|
||||
}
|
||||
if (getAclFeature() != null) {
|
||||
AclStorage.removeAclFeature(getAclFeature());
|
||||
}
|
||||
|
@ -712,13 +791,27 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
*/
|
||||
public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
|
||||
QuotaCounts counts = new QuotaCounts.Builder().build();
|
||||
final Iterable<BlockInfoContiguous> blocks;
|
||||
if (isStriped()) {
|
||||
return storagespaceConsumedWithStriped(bsp);
|
||||
} else {
|
||||
return storagespaceConsumedWithReplication(bsp);
|
||||
}
|
||||
}
|
||||
|
||||
public final QuotaCounts storagespaceConsumedWithStriped(
|
||||
BlockStoragePolicy bsp) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public final QuotaCounts storagespaceConsumedWithReplication(
|
||||
BlockStoragePolicy bsp) { QuotaCounts counts = new QuotaCounts.Builder().build();
|
||||
final Iterable<BlockInfo> blocks;
|
||||
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
||||
if (sf == null) {
|
||||
blocks = Arrays.asList(getBlocks());
|
||||
} else {
|
||||
// Collect all distinct blocks
|
||||
Set<BlockInfoContiguous> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
|
||||
Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
|
||||
List<FileDiff> diffs = sf.getDiffs().asList();
|
||||
for(FileDiff diff : diffs) {
|
||||
BlockInfoContiguous[] diffBlocks = diff.getBlocks();
|
||||
|
@ -730,7 +823,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
final short replication = getPreferredBlockReplication();
|
||||
for (BlockInfoContiguous b : blocks) {
|
||||
for (BlockInfo b : blocks) {
|
||||
long blockSize = b.isComplete() ? b.getNumBytes() :
|
||||
getPreferredBlockSize();
|
||||
counts.addStorageSpace(blockSize * replication);
|
||||
|
@ -746,24 +839,44 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
return counts;
|
||||
}
|
||||
|
||||
public final short getReplication(int lastSnapshotId) {
|
||||
if (lastSnapshotId != CURRENT_STATE_ID) {
|
||||
return getFileReplication(lastSnapshotId);
|
||||
} else {
|
||||
return getBlockReplication();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the penultimate allocated block for this file.
|
||||
*/
|
||||
BlockInfoContiguous getPenultimateBlock() {
|
||||
if (blocks == null || blocks.length <= 1) {
|
||||
return null;
|
||||
}
|
||||
return blocks[blocks.length - 2];
|
||||
BlockInfo getPenultimateBlock() {
|
||||
BlockInfo[] blks = getBlocks();
|
||||
return (blks == null || blks.length <= 1) ?
|
||||
null : blks[blks.length - 2];
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockInfoContiguous getLastBlock() {
|
||||
return blocks == null || blocks.length == 0? null: blocks[blocks.length-1];
|
||||
public BlockInfo getLastBlock() {
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb == null) {
|
||||
return blocks == null || blocks.length == 0 ?
|
||||
null : blocks[blocks.length - 1];
|
||||
} else {
|
||||
assert hasNoContiguousBlock();
|
||||
return sb.getLastBlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numBlocks() {
|
||||
return blocks == null ? 0 : blocks.length;
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb == null) {
|
||||
return blocks == null ? 0 : blocks.length;
|
||||
} else {
|
||||
assert hasNoContiguousBlock();
|
||||
return sb.numBlocks();
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -775,6 +888,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
// only compare the first block
|
||||
out.print(", blocks=");
|
||||
out.print(blocks == null || blocks.length == 0? null: blocks[0]);
|
||||
// TODO print striped blocks
|
||||
out.println();
|
||||
}
|
||||
|
||||
|
@ -784,9 +898,10 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
*/
|
||||
public long collectBlocksBeyondMax(final long max,
|
||||
final BlocksMapUpdateInfo collectedBlocks) {
|
||||
final BlockInfoContiguous[] oldBlocks = getBlocks();
|
||||
if (oldBlocks == null)
|
||||
final BlockInfo[] oldBlocks = getBlocks();
|
||||
if (oldBlocks == null) {
|
||||
return 0;
|
||||
}
|
||||
// find the minimum n such that the size of the first n blocks > max
|
||||
int n = 0;
|
||||
long size = 0;
|
||||
|
@ -865,21 +980,36 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
void truncateBlocksTo(int n) {
|
||||
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
|
||||
if (sb == null) {
|
||||
truncateContiguousBlocks(n);
|
||||
} else {
|
||||
sb.truncateStripedBlocks(n);
|
||||
}
|
||||
}
|
||||
|
||||
private void truncateContiguousBlocks(int n) {
|
||||
final BlockInfoContiguous[] newBlocks;
|
||||
if (n == 0) {
|
||||
newBlocks = BlockInfoContiguous.EMPTY_ARRAY;
|
||||
} else {
|
||||
newBlocks = new BlockInfoContiguous[n];
|
||||
System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
|
||||
System.arraycopy(blocks, 0, newBlocks, 0, n);
|
||||
}
|
||||
// set new blocks
|
||||
setBlocks(newBlocks);
|
||||
setContiguousBlocks(newBlocks);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is only called when block list is stored in snapshot
|
||||
* diffs. Note that this can only happen when truncation happens with
|
||||
* snapshots. Since we do not support truncation with striped blocks,
|
||||
* we only need to handle contiguous blocks here.
|
||||
*/
|
||||
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks,
|
||||
BlocksMapUpdateInfo collectedBlocks) {
|
||||
BlockInfoContiguous[] oldBlocks = getBlocks();
|
||||
if(snapshotBlocks == null || oldBlocks == null)
|
||||
BlockInfoContiguous[] oldBlocks = this.blocks;
|
||||
if (snapshotBlocks == null || oldBlocks == null)
|
||||
return;
|
||||
// Skip blocks in common between the file and the snapshot
|
||||
int n = 0;
|
||||
|
@ -887,7 +1017,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
oldBlocks[n] == snapshotBlocks[n]) {
|
||||
n++;
|
||||
}
|
||||
truncateBlocksTo(n);
|
||||
truncateContiguousBlocks(n);
|
||||
// Collect the remaining blocks of the file
|
||||
while(n < oldBlocks.length) {
|
||||
collectedBlocks.addDeleteBlock(oldBlocks[n++]);
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.TreeMap;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
|
||||
|
@ -108,15 +108,15 @@ public class LeaseManager {
|
|||
for (Long id : getINodeIdWithLeases()) {
|
||||
final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
|
||||
Preconditions.checkState(cons.isUnderConstruction());
|
||||
BlockInfoContiguous[] blocks = cons.getBlocks();
|
||||
BlockInfo[] blocks = cons.getBlocks();
|
||||
if(blocks == null) {
|
||||
continue;
|
||||
}
|
||||
for(BlockInfoContiguous b : blocks) {
|
||||
for(BlockInfo b : blocks) {
|
||||
if(!b.isComplete())
|
||||
numUCBlocks++;
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG.info("Number of blocks under construction: " + numUCBlocks);
|
||||
return numUCBlocks;
|
||||
}
|
||||
|
|
|
@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactor
|
|||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
|
@ -243,8 +244,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
//get blockInfo
|
||||
Block block = new Block(Block.getBlockId(blockId));
|
||||
//find which file this block belongs to
|
||||
BlockInfoContiguous blockInfo = namenode.getNamesystem()
|
||||
.getStoredBlock(block);
|
||||
BlockInfo blockInfo = namenode.getNamesystem().getStoredBlock(block);
|
||||
if(blockInfo == null) {
|
||||
out.println("Block "+ blockId +" " + NONEXISTENT_STATUS);
|
||||
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||
import org.apache.hadoop.hdfs.util.RwLock;
|
||||
|
@ -45,5 +46,5 @@ public interface Namesystem extends RwLock, SafeMode {
|
|||
|
||||
public void checkOperation(OperationCategory read) throws StandbyException;
|
||||
|
||||
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC);
|
||||
public boolean isInSnapshot(BlockCollection bc);
|
||||
}
|
|
@ -239,15 +239,16 @@ public class FSImageFormatPBSnapshot {
|
|||
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
|
||||
pbf.getFileSize());
|
||||
List<BlockProto> bpl = pbf.getBlocksList();
|
||||
// TODO: also persist striped blocks
|
||||
// in file diff there can only be contiguous blocks
|
||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[bpl.size()];
|
||||
for(int j = 0, e = bpl.size(); j < e; ++j) {
|
||||
Block blk = PBHelper.convert(bpl.get(j));
|
||||
BlockInfoContiguous storedBlock =
|
||||
(BlockInfoContiguous) fsn.getBlockManager().getStoredBlock(blk);
|
||||
if(storedBlock == null) {
|
||||
storedBlock = fsn.getBlockManager().addBlockCollection(
|
||||
new BlockInfoContiguous(blk, copy.getFileReplication()), file);
|
||||
storedBlock = (BlockInfoContiguous) fsn.getBlockManager()
|
||||
.addBlockCollection(new BlockInfoContiguous(blk,
|
||||
copy.getFileReplication()), file);
|
||||
}
|
||||
blocks[j] = storedBlock;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,9 @@ public class FileDiffList extends
|
|||
final FileDiff diff =
|
||||
super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
|
||||
if (withBlocks) { // Store blocks if this is the first update
|
||||
diff.setBlocks(iNodeFile.getBlocks());
|
||||
BlockInfoContiguous[] blks = iNodeFile.getContiguousBlocks();
|
||||
assert blks != null;
|
||||
diff.setBlocks(blks);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,7 +120,7 @@ public class FileDiffList extends
|
|||
(earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks());
|
||||
// Find later snapshot (or file itself) with blocks
|
||||
BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
|
||||
laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks;
|
||||
laterBlocks = (laterBlocks == null) ? file.getContiguousBlocks() : laterBlocks;
|
||||
// Skip blocks, which belong to either the earlier or the later lists
|
||||
int i = 0;
|
||||
for(; i < removedBlocks.length; i++) {
|
||||
|
|
|
@ -91,6 +91,10 @@ message INodeSection {
|
|||
optional string clientMachine = 2;
|
||||
}
|
||||
|
||||
message StripedBlocksFeature {
|
||||
repeated StripedBlockProto blocks = 1;
|
||||
}
|
||||
|
||||
message AclFeatureProto {
|
||||
/**
|
||||
* An ACL entry is represented by a 32-bit integer in Big Endian
|
||||
|
@ -139,6 +143,7 @@ message INodeSection {
|
|||
optional AclFeatureProto acl = 8;
|
||||
optional XAttrFeatureProto xAttrs = 9;
|
||||
optional uint32 storagePolicyID = 10;
|
||||
optional StripedBlocksFeature stripedBlocks = 11;
|
||||
}
|
||||
|
||||
message QuotaByStorageTypeEntryProto {
|
||||
|
|
|
@ -490,6 +490,16 @@ message BlockProto {
|
|||
optional uint64 numBytes = 3 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Striped block information. Besides the basic information for a block,
|
||||
* it also contains the number of data/parity blocks.
|
||||
*/
|
||||
message StripedBlockProto {
|
||||
required BlockProto block = 1;
|
||||
optional uint32 dataBlockNum = 2;
|
||||
optional uint32 parityBlockNum = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Block and datanodes where is it located
|
||||
*/
|
||||
|
|
|
@ -108,6 +108,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
|
@ -1609,7 +1610,7 @@ public class DFSTestUtil {
|
|||
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
|
||||
ExtendedBlock blk) {
|
||||
FSNamesystem fsn = nn.getNamesystem();
|
||||
BlockInfoContiguous storedBlock = fsn.getStoredBlock(blk.getLocalBlock());
|
||||
BlockInfo storedBlock = fsn.getStoredBlock(blk.getLocalBlock());
|
||||
assertTrue("Block " + blk + " should be under construction, " +
|
||||
"got: " + storedBlock,
|
||||
storedBlock instanceof BlockInfoContiguousUnderConstruction);
|
||||
|
|
|
@ -1246,8 +1246,8 @@ public class TestReplicationPolicy {
|
|||
(DatanodeStorageInfo.AddBlockResult.ADDED);
|
||||
ucBlock.addStorage(storage, ucBlock);
|
||||
|
||||
when(mbc.setLastBlock((BlockInfoContiguous) any(), (DatanodeStorageInfo[]) any()))
|
||||
.thenReturn(ucBlock);
|
||||
BlockInfo lastBlk = mbc.getLastBlock();
|
||||
when(mbc.getLastBlock()).thenReturn(lastBlk, ucBlock);
|
||||
|
||||
bm.convertLastBlockToUnderConstruction(mbc, 0L);
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -87,21 +87,21 @@ public class TestAddBlock {
|
|||
|
||||
// check file1
|
||||
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
|
||||
BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
|
||||
BlockInfo[] file1Blocks = file1Node.getBlocks();
|
||||
assertEquals(1, file1Blocks.length);
|
||||
assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes());
|
||||
assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState());
|
||||
|
||||
// check file2
|
||||
INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile();
|
||||
BlockInfoContiguous[] file2Blocks = file2Node.getBlocks();
|
||||
BlockInfo[] file2Blocks = file2Node.getBlocks();
|
||||
assertEquals(1, file2Blocks.length);
|
||||
assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes());
|
||||
assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState());
|
||||
|
||||
// check file3
|
||||
INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile();
|
||||
BlockInfoContiguous[] file3Blocks = file3Node.getBlocks();
|
||||
BlockInfo[] file3Blocks = file3Node.getBlocks();
|
||||
assertEquals(2, file3Blocks.length);
|
||||
assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes());
|
||||
assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState());
|
||||
|
@ -110,7 +110,7 @@ public class TestAddBlock {
|
|||
|
||||
// check file4
|
||||
INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile();
|
||||
BlockInfoContiguous[] file4Blocks = file4Node.getBlocks();
|
||||
BlockInfo[] file4Blocks = file4Node.getBlocks();
|
||||
assertEquals(2, file4Blocks.length);
|
||||
assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes());
|
||||
assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState());
|
||||
|
@ -141,7 +141,7 @@ public class TestAddBlock {
|
|||
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
||||
|
||||
INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
|
||||
BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
|
||||
BlockInfo[] fileBlocks = fileNode.getBlocks();
|
||||
assertEquals(2, fileBlocks.length);
|
||||
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
|
||||
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -75,7 +76,7 @@ public class TestAddBlockgroup {
|
|||
final Path file1 = new Path("/file1");
|
||||
DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L);
|
||||
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
|
||||
BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
|
||||
BlockInfo[] file1Blocks = file1Node.getBlocks();
|
||||
assertEquals(2, file1Blocks.length);
|
||||
assertEquals(GROUP_SIZE, file1Blocks[0].numNodes());
|
||||
assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP,
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.TestFileCreation;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
|
@ -91,12 +91,12 @@ public class TestBlockUnderConstruction {
|
|||
" isUnderConstruction = " + inode.isUnderConstruction() +
|
||||
" expected to be " + isFileOpen,
|
||||
inode.isUnderConstruction() == isFileOpen);
|
||||
BlockInfoContiguous[] blocks = inode.getBlocks();
|
||||
BlockInfo[] blocks = inode.getBlocks();
|
||||
assertTrue("File does not have blocks: " + inode.toString(),
|
||||
blocks != null && blocks.length > 0);
|
||||
|
||||
int idx = 0;
|
||||
BlockInfoContiguous curBlock;
|
||||
BlockInfo curBlock;
|
||||
// all blocks but the last two should be regular blocks
|
||||
for(; idx < blocks.length - 2; idx++) {
|
||||
curBlock = blocks[idx];
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -39,7 +40,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||
|
@ -105,7 +105,7 @@ public class TestFSImage {
|
|||
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
|
||||
assertEquals("hello".length(), file2Node.computeFileSize());
|
||||
assertTrue(file2Node.isUnderConstruction());
|
||||
BlockInfoContiguous[] blks = file2Node.getBlocks();
|
||||
BlockInfo[] blks = file2Node.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
|
||||
// check lease manager
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
|
@ -1035,7 +1036,8 @@ public class TestFileTruncate {
|
|||
iip = fsn.getFSDirectory().getINodesInPath(src, true);
|
||||
file = iip.getLastINode().asFile();
|
||||
file.recordModification(iip.getLatestSnapshotId(), true);
|
||||
assertThat(file.isBlockInLatestSnapshot(file.getLastBlock()), is(true));
|
||||
assertThat(file.isBlockInLatestSnapshot(
|
||||
(BlockInfoContiguous) file.getLastBlock()), is(true));
|
||||
initialGenStamp = file.getLastBlock().getGenerationStamp();
|
||||
// Test that prepareFileForTruncate sets up copy-on-write truncate
|
||||
fsn.writeLock();
|
||||
|
|
|
@ -79,7 +79,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||
|
@ -885,7 +885,7 @@ public class TestFsck {
|
|||
// intentionally corrupt NN data structure
|
||||
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
|
||||
(fileName, true);
|
||||
final BlockInfoContiguous[] blocks = node.getBlocks();
|
||||
final BlockInfo[] blocks = node.getBlocks();
|
||||
assertEquals(blocks.length, 1);
|
||||
blocks[0].setNumBytes(-1L); // set the block length to be negative
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -108,14 +108,14 @@ public class TestSnapshotBlocksMap {
|
|||
final FSDirectory dir, final BlockManager blkManager) throws Exception {
|
||||
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
|
||||
assertEquals(numBlocks, file.getBlocks().length);
|
||||
for(BlockInfoContiguous b : file.getBlocks()) {
|
||||
for(BlockInfo b : file.getBlocks()) {
|
||||
assertBlockCollection(blkManager, file, b);
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
static void assertBlockCollection(final BlockManager blkManager,
|
||||
final INodeFile file, final BlockInfoContiguous b) {
|
||||
final INodeFile file, final BlockInfo b) {
|
||||
Assert.assertSame(b, blkManager.getStoredBlock(b));
|
||||
Assert.assertSame(file, blkManager.getBlockCollection(b));
|
||||
Assert.assertSame(file, b.getBlockCollection());
|
||||
|
@ -146,10 +146,10 @@ public class TestSnapshotBlocksMap {
|
|||
{
|
||||
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
|
||||
blockmanager);
|
||||
BlockInfoContiguous[] blocks = f2.getBlocks();
|
||||
BlockInfo[] blocks = f2.getBlocks();
|
||||
hdfs.delete(sub2, true);
|
||||
// The INode should have been removed from the blocksMap
|
||||
for(BlockInfoContiguous b : blocks) {
|
||||
for(BlockInfo b : blocks) {
|
||||
assertNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ public class TestSnapshotBlocksMap {
|
|||
// Check the block information for file0
|
||||
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
|
||||
blockmanager);
|
||||
BlockInfoContiguous[] blocks0 = f0.getBlocks();
|
||||
BlockInfo[] blocks0 = f0.getBlocks();
|
||||
|
||||
// Also check the block information for snapshot of file0
|
||||
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
|
||||
|
@ -187,7 +187,7 @@ public class TestSnapshotBlocksMap {
|
|||
// Delete file0
|
||||
hdfs.delete(file0, true);
|
||||
// Make sure the blocks of file0 is still in blocksMap
|
||||
for(BlockInfoContiguous b : blocks0) {
|
||||
for(BlockInfo b : blocks0) {
|
||||
assertNotNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
|
||||
|
@ -201,7 +201,7 @@ public class TestSnapshotBlocksMap {
|
|||
hdfs.deleteSnapshot(sub1, "s1");
|
||||
|
||||
// Make sure the first block of file0 is still in blocksMap
|
||||
for(BlockInfoContiguous b : blocks0) {
|
||||
for(BlockInfo b : blocks0) {
|
||||
assertNotNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
|
||||
|
@ -293,7 +293,7 @@ public class TestSnapshotBlocksMap {
|
|||
hdfs.append(bar);
|
||||
|
||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
||||
BlockInfo[] blks = barNode.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||
|
@ -331,7 +331,7 @@ public class TestSnapshotBlocksMap {
|
|||
hdfs.append(bar);
|
||||
|
||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
||||
BlockInfo[] blks = barNode.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||
cluster.getNameNodeRpc()
|
||||
|
@ -370,7 +370,7 @@ public class TestSnapshotBlocksMap {
|
|||
hdfs.append(bar);
|
||||
|
||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
||||
BlockInfo[] blks = barNode.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||
cluster.getNameNodeRpc()
|
||||
|
@ -421,7 +421,7 @@ public class TestSnapshotBlocksMap {
|
|||
out.write(testData);
|
||||
out.close();
|
||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||
BlockInfoContiguous[] blks = barNode.getBlocks();
|
||||
BlockInfo[] blks = barNode.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
assertEquals(testData.length, blks[0].getNumBytes());
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -262,12 +262,12 @@ public class TestSnapshotDeletion {
|
|||
DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
|
||||
final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
|
||||
tempFile.toString(), 1, fsdir, blockmanager);
|
||||
BlockInfoContiguous[] blocks = temp.getBlocks();
|
||||
BlockInfo[] blocks = temp.getBlocks();
|
||||
hdfs.delete(tempDir, true);
|
||||
// check dir's quota usage
|
||||
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
|
||||
// check blocks of tempFile
|
||||
for (BlockInfoContiguous b : blocks) {
|
||||
for (BlockInfo b : blocks) {
|
||||
assertNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
|
||||
|
@ -344,7 +344,7 @@ public class TestSnapshotDeletion {
|
|||
// while deletion, we add diff for subsub and metaChangeFile1, and remove
|
||||
// newFile
|
||||
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
|
||||
for (BlockInfoContiguous b : blocks) {
|
||||
for (BlockInfo b : blocks) {
|
||||
assertNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ public class TestSnapshotDeletion {
|
|||
|
||||
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
|
||||
.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
|
||||
BlockInfoContiguous[] blocks = toDeleteFileNode.getBlocks();
|
||||
BlockInfo[] blocks = toDeleteFileNode.getBlocks();
|
||||
|
||||
// create snapshot s0 on dir
|
||||
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
|
||||
|
@ -507,7 +507,7 @@ public class TestSnapshotDeletion {
|
|||
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
|
||||
// metaChangeFile's replication factor decreases
|
||||
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
|
||||
for (BlockInfoContiguous b : blocks) {
|
||||
for (BlockInfo b : blocks) {
|
||||
assertNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
|
||||
|
@ -801,7 +801,7 @@ public class TestSnapshotDeletion {
|
|||
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
|
||||
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
|
||||
file14_s2.toString(), 1, fsdir, blockmanager);
|
||||
BlockInfoContiguous[] blocks_14 = file14Node.getBlocks();
|
||||
BlockInfo[] blocks_14 = file14Node.getBlocks();
|
||||
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
|
||||
blockmanager);
|
||||
|
||||
|
@ -838,7 +838,7 @@ public class TestSnapshotDeletion {
|
|||
modDirStr + "file15");
|
||||
assertFalse(hdfs.exists(file14_s1));
|
||||
assertFalse(hdfs.exists(file15_s1));
|
||||
for (BlockInfoContiguous b : blocks_14) {
|
||||
for (BlockInfo b : blocks_14) {
|
||||
assertNull(blockmanager.getBlockCollection(b));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue