Revert 1305590 for HDFS-3089.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1305598 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1ebcc378af
commit
5e38a9acea
|
@ -263,9 +263,6 @@ Release 0.23.3 - UNRELEASED
|
|||
HDFS-3071. haadmin failover command does not provide enough detail when
|
||||
target NN is not ready to be active. (todd)
|
||||
|
||||
HDFS-3089. Move FSDatasetInterface and the related classes to a package.
|
||||
(szetszwo)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||
|
|
|
@ -237,6 +237,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
|
||||
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
|
||||
public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
|
||||
public static final String DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY = "dfs.datanode.block.volume.choice.policy";
|
||||
public static final String DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY_DEFAULT =
|
||||
"org.apache.hadoop.hdfs.server.datanode.RoundRobinVolumesPolicy";
|
||||
public static final String DFS_HEARTBEAT_INTERVAL_KEY = "dfs.heartbeat.interval";
|
||||
public static final long DFS_HEARTBEAT_INTERVAL_DEFAULT = 3;
|
||||
public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = "dfs.namenode.decommission.interval";
|
||||
|
@ -302,7 +305,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
//Keys with no defaults
|
||||
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
|
||||
public static final String DFS_DATANODE_FSDATASET_FACTORY_KEY = "dfs.datanode.fsdataset.factory";
|
||||
public static final String DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY = "dfs.datanode.fsdataset.volume.choosing.policy";
|
||||
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
|
||||
public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
|
||||
public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
|
||||
|
|
|
@ -44,9 +44,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
|
||||
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
|
@ -74,7 +72,7 @@ class BlockPoolSliceScanner {
|
|||
private final AtomicLong lastScanTime = new AtomicLong();
|
||||
|
||||
private final DataNode datanode;
|
||||
private final FsDatasetSpi<? extends FsVolumeSpi> dataset;
|
||||
private final FSDatasetInterface<? extends FsVolumeSpi> dataset;
|
||||
|
||||
private final SortedSet<BlockScanInfo> blockInfoSet
|
||||
= new TreeSet<BlockScanInfo>();
|
||||
|
@ -136,7 +134,8 @@ class BlockPoolSliceScanner {
|
|||
}
|
||||
|
||||
BlockPoolSliceScanner(String bpid, DataNode datanode,
|
||||
FsDatasetSpi<? extends FsVolumeSpi> dataset, Configuration conf) {
|
||||
FSDatasetInterface<? extends FsVolumeSpi> dataset,
|
||||
Configuration conf) {
|
||||
this.datanode = datanode;
|
||||
this.dataset = dataset;
|
||||
this.blockPoolId = bpid;
|
||||
|
|
|
@ -1,46 +1,3 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* This interface specifies the policy for choosing volumes to store replicas.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface VolumeChoosingPolicy<V extends FsVolumeSpi> {
|
||||
|
||||
/**
|
||||
* Choose a volume to place a replica,
|
||||
* given a list of volumes and the replica size sought for storage.
|
||||
*
|
||||
* The implementations of this interface must be thread-safe.
|
||||
*
|
||||
* @param volumes - a list of available volumes.
|
||||
* @param replicaSize - the size of the replica for which a volume is sought.
|
||||
* @return the chosen volume.
|
||||
* @throws IOException when disks are unavailable or are full.
|
||||
*/
|
||||
public V chooseVolume(List<V> volumes, long replicaSize) throws IOException;
|
||||
}
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
|
||||
/**
|
||||
|
@ -44,7 +43,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
|||
public class DataBlockScanner implements Runnable {
|
||||
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
|
||||
private final DataNode datanode;
|
||||
private final FsDatasetSpi<? extends FsVolumeSpi> dataset;
|
||||
private final FSDatasetInterface<? extends FsVolumeSpi> dataset;
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
|
@ -56,7 +55,7 @@ public class DataBlockScanner implements Runnable {
|
|||
Thread blockScannerThread = null;
|
||||
|
||||
DataBlockScanner(DataNode datanode,
|
||||
FsDatasetSpi<? extends FsVolumeSpi> dataset,
|
||||
FSDatasetInterface<? extends FsVolumeSpi> dataset,
|
||||
Configuration conf) {
|
||||
this.datanode = datanode;
|
||||
this.dataset = dataset;
|
||||
|
|
|
@ -122,7 +122,6 @@ import org.apache.hadoop.hdfs.server.common.JspHelper;
|
|||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.Util;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
|
||||
|
@ -232,7 +231,7 @@ public class DataNode extends Configured
|
|||
|
||||
volatile boolean shouldRun = true;
|
||||
private BlockPoolManager blockPoolManager;
|
||||
volatile FsDatasetSpi<? extends FsVolumeSpi> data = null;
|
||||
volatile FSDatasetInterface<? extends FsVolumeSpi> data = null;
|
||||
private String clusterId = null;
|
||||
|
||||
public final static String EMPTY_DEL_HINT = "";
|
||||
|
@ -810,8 +809,8 @@ public class DataNode extends Configured
|
|||
* handshake with the the first namenode is completed.
|
||||
*/
|
||||
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
|
||||
final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory
|
||||
= FsDatasetSpi.Factory.getFactory(conf);
|
||||
final FSDatasetInterface.Factory<? extends FSDatasetInterface<?>> factory
|
||||
= FSDatasetInterface.Factory.getFactory(conf);
|
||||
|
||||
if (!factory.isSimulated()) {
|
||||
final StartupOption startOpt = getStartupOption(conf);
|
||||
|
@ -829,7 +828,7 @@ public class DataNode extends Configured
|
|||
|
||||
synchronized(this) {
|
||||
if (data == null) {
|
||||
data = factory.newInstance(this, storage, conf);
|
||||
data = factory.createFSDatasetInterface(this, storage, conf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1696,7 +1695,7 @@ public class DataNode extends Configured
|
|||
*
|
||||
* @return the fsdataset that stores the blocks
|
||||
*/
|
||||
FsDatasetSpi<?> getFSDataset() {
|
||||
FSDatasetInterface<?> getFSDataset() {
|
||||
return data;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
|
||||
|
@ -56,7 +55,7 @@ public class DirectoryScanner implements Runnable {
|
|||
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
|
||||
|
||||
private final DataNode datanode;
|
||||
private final FsDatasetSpi<?> dataset;
|
||||
private final FSDatasetInterface<?> dataset;
|
||||
private final ExecutorService reportCompileThreadPool;
|
||||
private final ScheduledExecutorService masterThread;
|
||||
private final long scanPeriodMsecs;
|
||||
|
@ -220,7 +219,7 @@ public class DirectoryScanner implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
DirectoryScanner(DataNode dn, FsDatasetSpi<?> dataset, Configuration conf) {
|
||||
DirectoryScanner(DataNode dn, FSDatasetInterface<?> dataset, Configuration conf) {
|
||||
this.datanode = dn;
|
||||
this.dataset = dataset;
|
||||
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
|
||||
|
@ -412,7 +411,7 @@ public class DirectoryScanner implements Runnable {
|
|||
}
|
||||
|
||||
/** Is the given volume still valid in the dataset? */
|
||||
private static boolean isValid(final FsDatasetSpi<?> dataset,
|
||||
private static boolean isValid(final FSDatasetInterface<?> dataset,
|
||||
final FsVolumeSpi volume) {
|
||||
for (FsVolumeSpi vol : dataset.getVolumes()) {
|
||||
if (vol == volume) {
|
||||
|
|
|
@ -61,14 +61,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||
|
@ -86,13 +82,13 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
*
|
||||
***************************************************/
|
||||
@InterfaceAudience.Private
|
||||
public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
||||
class FSDataset implements FSDatasetInterface<FSDataset.FSVolume> {
|
||||
/**
|
||||
* A factory for creating FSDataset objects.
|
||||
*/
|
||||
public static class Factory extends FsDatasetSpi.Factory<FSDataset> {
|
||||
static class Factory extends FSDatasetInterface.Factory<FSDataset> {
|
||||
@Override
|
||||
public FSDataset newInstance(DataNode datanode,
|
||||
public FSDataset createFSDatasetInterface(DataNode datanode,
|
||||
DataStorage storage, Configuration conf) throws IOException {
|
||||
return new FSDataset(datanode, storage, conf);
|
||||
}
|
||||
|
@ -827,11 +823,11 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
*/
|
||||
private volatile List<FSVolume> volumes = null;
|
||||
|
||||
final VolumeChoosingPolicy<FSVolume> blockChooser;
|
||||
BlockVolumeChoosingPolicy<FSVolume> blockChooser;
|
||||
int numFailedVolumes;
|
||||
|
||||
FSVolumeSet(List<FSVolume> volumes, int failedVols,
|
||||
VolumeChoosingPolicy<FSVolume> blockChooser) {
|
||||
BlockVolumeChoosingPolicy<FSVolume> blockChooser) {
|
||||
this.volumes = Collections.unmodifiableList(volumes);
|
||||
this.blockChooser = blockChooser;
|
||||
this.numFailedVolumes = failedVols;
|
||||
|
@ -1022,7 +1018,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
}
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public List<FSVolume> getVolumes() {
|
||||
return volumes.volumes;
|
||||
}
|
||||
|
@ -1033,7 +1029,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return r != null? (FSVolume)r.getVolume(): null;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized Block getStoredBlock(String bpid, long blkid)
|
||||
throws IOException {
|
||||
File blockfile = getFile(bpid, blkid);
|
||||
|
@ -1070,7 +1066,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
|
||||
throws IOException {
|
||||
final File meta = getMetaFile(b);
|
||||
|
@ -1129,11 +1125,11 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
volumeMap = new ReplicasMap(this);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
final VolumeChoosingPolicy<FSVolume> blockChooserImpl =
|
||||
final BlockVolumeChoosingPolicy<FSVolume> blockChooserImpl =
|
||||
ReflectionUtils.newInstance(conf.getClass(
|
||||
DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
|
||||
RoundRobinVolumeChoosingPolicy.class,
|
||||
VolumeChoosingPolicy.class), conf);
|
||||
DFSConfigKeys.DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY,
|
||||
RoundRobinVolumesPolicy.class,
|
||||
BlockVolumeChoosingPolicy.class), conf);
|
||||
volumes = new FSVolumeSet(volArray, volsFailed, blockChooserImpl);
|
||||
volumes.getVolumeMap(volumeMap);
|
||||
|
||||
|
@ -1168,7 +1164,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Return true - if there are still valid volumes on the DataNode.
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public boolean hasEnoughResource() {
|
||||
return getVolumes().size() >= validVolsRequired;
|
||||
}
|
||||
|
@ -1203,7 +1199,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Find the block's on-disk length
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public long getLength(ExtendedBlock b) throws IOException {
|
||||
return getBlockFile(b).length();
|
||||
}
|
||||
|
@ -1247,7 +1243,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return f;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public InputStream getBlockInputStream(ExtendedBlock b,
|
||||
long seekOffset) throws IOException {
|
||||
File blockFile = getBlockFileNoExistsCheck(b);
|
||||
|
@ -1305,7 +1301,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Returns handles to the block file and its metadata file
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
|
||||
long blkOffset, long ckoff) throws IOException {
|
||||
ReplicaInfo info = getReplicaInfo(b);
|
||||
|
@ -1410,7 +1406,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
}
|
||||
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
|
||||
long newGS, long expectedBlockLen) throws IOException {
|
||||
// If the block was successfully finalized because all packets
|
||||
|
@ -1551,7 +1547,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return replicaInfo;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface recoverAppend(ExtendedBlock b,
|
||||
long newGS, long expectedBlockLen) throws IOException {
|
||||
DataNode.LOG.info("Recover failed append to " + b);
|
||||
|
@ -1568,7 +1564,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
}
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void recoverClose(ExtendedBlock b, long newGS,
|
||||
long expectedBlockLen) throws IOException {
|
||||
DataNode.LOG.info("Recover failed close " + b);
|
||||
|
@ -1610,7 +1606,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
}
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b)
|
||||
throws IOException {
|
||||
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
|
||||
|
@ -1630,7 +1626,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return newReplicaInfo;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
|
||||
long newGS, long minBytesRcvd, long maxBytesRcvd)
|
||||
throws IOException {
|
||||
|
@ -1675,7 +1671,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return rbw;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface convertTemporaryToRbw(
|
||||
final ExtendedBlock b) throws IOException {
|
||||
final long blockId = b.getBlockId();
|
||||
|
@ -1736,7 +1732,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return rbw;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
|
||||
throws IOException {
|
||||
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
|
||||
|
@ -1760,7 +1756,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
* Sets the offset in the meta file so that the
|
||||
* last checksum will be overwritten.
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams,
|
||||
int checksumSize) throws IOException {
|
||||
FileOutputStream file = (FileOutputStream) streams.getChecksumOut();
|
||||
|
@ -1785,7 +1781,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Complete the block write!
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
|
||||
ReplicaInfo replicaInfo = getReplicaInfo(b);
|
||||
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
|
||||
|
@ -1822,7 +1818,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Remove the temporary block file (if any)
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
|
||||
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
|
||||
b.getLocalBlock());
|
||||
|
@ -1867,7 +1863,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Generates a block report from the in-memory block map.
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public BlockListAsLongs getBlockReport(String bpid) {
|
||||
int size = volumeMap.size(bpid);
|
||||
ArrayList<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(size);
|
||||
|
@ -1918,7 +1914,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
* Check whether the given block is a valid one.
|
||||
* valid means finalized
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public boolean isValidBlock(ExtendedBlock b) {
|
||||
return isValid(b, ReplicaState.FINALIZED);
|
||||
}
|
||||
|
@ -1926,7 +1922,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* Check whether the given block is a valid RBW.
|
||||
*/
|
||||
@Override // {@link FsDatasetSpi}
|
||||
@Override // {@link FSDatasetInterface}
|
||||
public boolean isValidRbw(final ExtendedBlock b) {
|
||||
return isValid(b, ReplicaState.RBW);
|
||||
}
|
||||
|
@ -1991,7 +1987,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
* could lazily garbage-collect the block, but why bother?
|
||||
* just get rid of it.
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
|
||||
boolean error = false;
|
||||
for (int i = 0; i < invalidBlks.length; i++) {
|
||||
|
@ -2057,7 +2053,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
datanode.notifyNamenodeDeletedBlock(block);
|
||||
}
|
||||
|
||||
@Override // {@link FsDatasetSpi}
|
||||
@Override // {@link FSDatasetInterface}
|
||||
public synchronized boolean contains(final ExtendedBlock block) {
|
||||
final long blockId = block.getLocalBlock().getBlockId();
|
||||
return getFile(block.getBlockPoolId(), blockId) != null;
|
||||
|
@ -2082,7 +2078,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
* to these volumes
|
||||
* @throws DiskErrorException
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void checkDataDir() throws DiskErrorException {
|
||||
long totalBlocks=0, removedBlocks=0;
|
||||
List<FSVolume> failedVols = volumes.checkDirs();
|
||||
|
@ -2126,7 +2122,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
}
|
||||
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public String toString() {
|
||||
return "FSDataset{dirpath='"+volumes+"'}";
|
||||
}
|
||||
|
@ -2157,7 +2153,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
DataNode.LOG.info("Registered FSDatasetState MBean");
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void shutdown() {
|
||||
if (mbeanName != null)
|
||||
MBeans.unregister(mbeanName);
|
||||
|
@ -2338,7 +2334,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
/**
|
||||
* @deprecated use {@link #fetchReplicaInfo(String, long)} instead.
|
||||
*/
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
@Deprecated
|
||||
public ReplicaInfo getReplica(String bpid, long blockId) {
|
||||
return volumeMap.get(bpid, blockId);
|
||||
|
@ -2350,7 +2346,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return r == null? "null": r.toString();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaRecoveryInfo initReplicaRecovery(
|
||||
RecoveringBlock rBlock) throws IOException {
|
||||
return initReplicaRecovery(rBlock.getBlock().getBlockPoolId(),
|
||||
|
@ -2423,7 +2419,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return rur.createInfo();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized String updateReplicaUnderRecovery(
|
||||
final ExtendedBlock oldBlock,
|
||||
final long recoveryId,
|
||||
|
@ -2505,7 +2501,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return finalizeReplica(bpid, rur);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized long getReplicaVisibleLength(final ExtendedBlock block)
|
||||
throws IOException {
|
||||
final Replica replica = getReplicaInfo(block.getBlockPoolId(),
|
||||
|
@ -2588,7 +2584,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
return info;
|
||||
}
|
||||
|
||||
@Override //FsDatasetSpi
|
||||
@Override //FSDatasetInterface
|
||||
public synchronized void deleteBlockPool(String bpid, boolean force)
|
||||
throws IOException {
|
||||
if (!force) {
|
||||
|
@ -2606,7 +2602,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
|||
}
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
|
||||
throws IOException {
|
||||
File datafile = getBlockFile(block);
|
||||
|
|
|
@ -1,379 +1,3 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
|
||||
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* This is a service provider interface for the underlying storage that
|
||||
* stores replicas for a data node.
|
||||
* The default implementation stores replicas on local drives.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
|
||||
/**
|
||||
* A factory for creating {@link FsDatasetSpi} objects.
|
||||
*/
|
||||
public static abstract class Factory<D extends FsDatasetSpi<?>> {
|
||||
/** @return the configured factory. */
|
||||
public static Factory<?> getFactory(Configuration conf) {
|
||||
@SuppressWarnings("rawtypes")
|
||||
final Class<? extends Factory> clazz = conf.getClass(
|
||||
DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
|
||||
FSDataset.Factory.class,
|
||||
Factory.class);
|
||||
return ReflectionUtils.newInstance(clazz, conf);
|
||||
}
|
||||
|
||||
/** Create a new object. */
|
||||
public abstract D newInstance(DataNode datanode, DataStorage storage,
|
||||
Configuration conf) throws IOException;
|
||||
|
||||
/** Does the factory create simulated objects? */
|
||||
public boolean isSimulated() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create rolling logs.
|
||||
*
|
||||
* @param prefix the prefix of the log names.
|
||||
* @return rolling logs
|
||||
*/
|
||||
public RollingLogs createRollingLogs(String bpid, String prefix
|
||||
) throws IOException;
|
||||
|
||||
/** @return a list of volumes. */
|
||||
public List<V> getVolumes();
|
||||
|
||||
/** @return the volume that contains a replica of the block. */
|
||||
public V getVolume(ExtendedBlock b);
|
||||
|
||||
/** @return a volume information map (name => info). */
|
||||
public Map<String, Object> getVolumeInfoMap();
|
||||
|
||||
/** @return a list of block pools. */
|
||||
public String[] getBlockPoolList();
|
||||
|
||||
/** @return a list of finalized blocks for the given block pool. */
|
||||
public List<Block> getFinalizedBlocks(String bpid);
|
||||
|
||||
/**
|
||||
* Check whether the in-memory block record matches the block on the disk,
|
||||
* and, in case that they are not matched, update the record or mark it
|
||||
* as corrupted.
|
||||
*/
|
||||
public void checkAndUpdate(String bpid, long blockId, File diskFile,
|
||||
File diskMetaFile, FsVolumeSpi vol);
|
||||
|
||||
/**
|
||||
* @param b - the block
|
||||
* @return a stream if the meta-data of the block exists;
|
||||
* otherwise, return null.
|
||||
* @throws IOException
|
||||
*/
|
||||
public LengthInputStream getMetaDataInputStream(ExtendedBlock b
|
||||
) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the specified block's on-disk length (excluding metadata)
|
||||
* @param b
|
||||
* @return the specified block's on-disk length (excluding metadta)
|
||||
* @throws IOException
|
||||
*/
|
||||
public long getLength(ExtendedBlock b) throws IOException;
|
||||
|
||||
/**
|
||||
* Get reference to the replica meta info in the replicasMap.
|
||||
* To be called from methods that are synchronized on {@link FSDataset}
|
||||
* @param blockId
|
||||
* @return replica from the replicas map
|
||||
*/
|
||||
@Deprecated
|
||||
public Replica getReplica(String bpid, long blockId);
|
||||
|
||||
/**
|
||||
* @return replica meta information
|
||||
*/
|
||||
public String getReplicaString(String bpid, long blockId);
|
||||
|
||||
/**
|
||||
* @return the generation stamp stored with the block.
|
||||
*/
|
||||
public Block getStoredBlock(String bpid, long blkid) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns an input stream at specified offset of the specified block
|
||||
* @param b
|
||||
* @param seekOffset
|
||||
* @return an input stream to read the contents of the specified block,
|
||||
* starting at the offset
|
||||
* @throws IOException
|
||||
*/
|
||||
public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Returns an input stream at specified offset of the specified block
|
||||
* The block is still in the tmp directory and is not finalized
|
||||
* @param b
|
||||
* @param blkoff
|
||||
* @param ckoff
|
||||
* @return an input stream to read the contents of the specified block,
|
||||
* starting at the offset
|
||||
* @throws IOException
|
||||
*/
|
||||
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
|
||||
long ckoff) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a temporary replica and returns the meta information of the replica
|
||||
*
|
||||
* @param b block
|
||||
* @return the meta info of the replica which is being written to
|
||||
* @throws IOException if an error occurs
|
||||
*/
|
||||
public ReplicaInPipelineInterface createTemporary(ExtendedBlock b
|
||||
) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a RBW replica and returns the meta info of the replica
|
||||
*
|
||||
* @param b block
|
||||
* @return the meta info of the replica which is being written to
|
||||
* @throws IOException if an error occurs
|
||||
*/
|
||||
public ReplicaInPipelineInterface createRbw(ExtendedBlock b
|
||||
) throws IOException;
|
||||
|
||||
/**
|
||||
* Recovers a RBW replica and returns the meta info of the replica
|
||||
*
|
||||
* @param b block
|
||||
* @param newGS the new generation stamp for the replica
|
||||
* @param minBytesRcvd the minimum number of bytes that the replica could have
|
||||
* @param maxBytesRcvd the maximum number of bytes that the replica could have
|
||||
* @return the meta info of the replica which is being written to
|
||||
* @throws IOException if an error occurs
|
||||
*/
|
||||
public ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
|
||||
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException;
|
||||
|
||||
/**
|
||||
* Covert a temporary replica to a RBW.
|
||||
* @param temporary the temporary replica being converted
|
||||
* @return the result RBW
|
||||
*/
|
||||
public ReplicaInPipelineInterface convertTemporaryToRbw(
|
||||
ExtendedBlock temporary) throws IOException;
|
||||
|
||||
/**
|
||||
* Append to a finalized replica and returns the meta info of the replica
|
||||
*
|
||||
* @param b block
|
||||
* @param newGS the new generation stamp for the replica
|
||||
* @param expectedBlockLen the number of bytes the replica is expected to have
|
||||
* @return the meata info of the replica which is being written to
|
||||
* @throws IOException
|
||||
*/
|
||||
public ReplicaInPipelineInterface append(ExtendedBlock b, long newGS,
|
||||
long expectedBlockLen) throws IOException;
|
||||
|
||||
/**
|
||||
* Recover a failed append to a finalized replica
|
||||
* and returns the meta info of the replica
|
||||
*
|
||||
* @param b block
|
||||
* @param newGS the new generation stamp for the replica
|
||||
* @param expectedBlockLen the number of bytes the replica is expected to have
|
||||
* @return the meta info of the replica which is being written to
|
||||
* @throws IOException
|
||||
*/
|
||||
public ReplicaInPipelineInterface recoverAppend(ExtendedBlock b, long newGS,
|
||||
long expectedBlockLen) throws IOException;
|
||||
|
||||
/**
|
||||
* Recover a failed pipeline close
|
||||
* It bumps the replica's generation stamp and finalize it if RBW replica
|
||||
*
|
||||
* @param b block
|
||||
* @param newGS the new generation stamp for the replica
|
||||
* @param expectedBlockLen the number of bytes the replica is expected to have
|
||||
* @throws IOException
|
||||
*/
|
||||
public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen
|
||||
) throws IOException;
|
||||
|
||||
/**
|
||||
* Finalizes the block previously opened for writing using writeToBlock.
|
||||
* The block size is what is in the parameter b and it must match the amount
|
||||
* of data written
|
||||
* @param b
|
||||
* @throws IOException
|
||||
*/
|
||||
public void finalizeBlock(ExtendedBlock b) throws IOException;
|
||||
|
||||
/**
|
||||
* Unfinalizes the block previously opened for writing using writeToBlock.
|
||||
* The temporary file associated with this block is deleted.
|
||||
* @param b
|
||||
* @throws IOException
|
||||
*/
|
||||
public void unfinalizeBlock(ExtendedBlock b) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the block report - the full list of blocks stored under a
|
||||
* block pool
|
||||
* @param bpid Block Pool Id
|
||||
* @return - the block report - the full list of blocks stored
|
||||
*/
|
||||
public BlockListAsLongs getBlockReport(String bpid);
|
||||
|
||||
/** Does the dataset contain the block? */
|
||||
public boolean contains(ExtendedBlock block);
|
||||
|
||||
/**
|
||||
* Is the block valid?
|
||||
* @param b
|
||||
* @return - true if the specified block is valid
|
||||
*/
|
||||
public boolean isValidBlock(ExtendedBlock b);
|
||||
|
||||
/**
|
||||
* Is the block a valid RBW?
|
||||
* @param b
|
||||
* @return - true if the specified block is a valid RBW
|
||||
*/
|
||||
public boolean isValidRbw(ExtendedBlock b);
|
||||
|
||||
/**
|
||||
* Invalidates the specified blocks
|
||||
* @param bpid Block pool Id
|
||||
* @param invalidBlks - the blocks to be invalidated
|
||||
* @throws IOException
|
||||
*/
|
||||
public void invalidate(String bpid, Block invalidBlks[]) throws IOException;
|
||||
|
||||
/**
|
||||
* Check if all the data directories are healthy
|
||||
* @throws DiskErrorException
|
||||
*/
|
||||
public void checkDataDir() throws DiskErrorException;
|
||||
|
||||
/**
|
||||
* Shutdown the FSDataset
|
||||
*/
|
||||
public void shutdown();
|
||||
|
||||
/**
|
||||
* Sets the file pointer of the checksum stream so that the last checksum
|
||||
* will be overwritten
|
||||
* @param b block
|
||||
* @param outs The streams for the data file and checksum file
|
||||
* @param checksumSize number of bytes each checksum has
|
||||
* @throws IOException
|
||||
*/
|
||||
public void adjustCrcChannelPosition(ExtendedBlock b,
|
||||
ReplicaOutputStreams outs, int checksumSize) throws IOException;
|
||||
|
||||
/**
|
||||
* Checks how many valid storage volumes there are in the DataNode.
|
||||
* @return true if more than the minimum number of valid volumes are left
|
||||
* in the FSDataSet.
|
||||
*/
|
||||
public boolean hasEnoughResource();
|
||||
|
||||
/**
|
||||
* Get visible length of the specified replica.
|
||||
*/
|
||||
long getReplicaVisibleLength(final ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Initialize a replica recovery.
|
||||
* @return actual state of the replica on this data-node or
|
||||
* null if data-node does not have the replica.
|
||||
*/
|
||||
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock
|
||||
) throws IOException;
|
||||
|
||||
/**
|
||||
* Update replica's generation stamp and length and finalize it.
|
||||
* @return the ID of storage that stores the block
|
||||
*/
|
||||
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
|
||||
long recoveryId, long newLength) throws IOException;
|
||||
|
||||
/**
|
||||
* add new block pool ID
|
||||
* @param bpid Block pool Id
|
||||
* @param conf Configuration
|
||||
*/
|
||||
public void addBlockPool(String bpid, Configuration conf) throws IOException;
|
||||
|
||||
/**
|
||||
* Shutdown and remove the block pool from underlying storage.
|
||||
* @param bpid Block pool Id to be removed
|
||||
*/
|
||||
public void shutdownBlockPool(String bpid) ;
|
||||
|
||||
/**
|
||||
* Deletes the block pool directories. If force is false, directories are
|
||||
* deleted only if no block files exist for the block pool. If force
|
||||
* is true entire directory for the blockpool is deleted along with its
|
||||
* contents.
|
||||
* @param bpid BlockPool Id to be deleted.
|
||||
* @param force If force is false, directories are deleted only if no
|
||||
* block files exist for the block pool, otherwise entire
|
||||
* directory for the blockpool is deleted along with its contents.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteBlockPool(String bpid, boolean force) throws IOException;
|
||||
|
||||
/**
|
||||
* Get {@link BlockLocalPathInfo} for the given block.
|
||||
*/
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b
|
||||
) throws IOException;
|
||||
}
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.util.DataChecksum;
|
|||
/**
|
||||
* This defines the interface of a replica in Pipeline that's being written to
|
||||
*/
|
||||
public interface ReplicaInPipelineInterface extends Replica {
|
||||
interface ReplicaInPipelineInterface extends Replica {
|
||||
/**
|
||||
* Set the number of bytes received
|
||||
* @param bytesReceived number of bytes received
|
||||
|
|
|
@ -1,70 +1,3 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Rolling logs consist of a current log and a set of previous logs.
|
||||
*
|
||||
* The implementation should support a single appender and multiple readers.
|
||||
*/
|
||||
public interface RollingLogs {
|
||||
/**
|
||||
* To iterate the lines of the logs.
|
||||
*/
|
||||
public interface LineIterator extends Iterator<String>, Closeable {
|
||||
/** Is the iterator iterating the previous? */
|
||||
public boolean isPrevious();
|
||||
}
|
||||
|
||||
/**
|
||||
* To append text to the logs.
|
||||
*/
|
||||
public interface Appender extends Appendable, Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an iterator to iterate the lines in the logs.
|
||||
*
|
||||
* @param skipPrevious Should it skip reading the previous log?
|
||||
* @return a new iterator.
|
||||
*/
|
||||
public LineIterator iterator(boolean skipPrevious) throws IOException;
|
||||
|
||||
/**
|
||||
* @return the only appender to append text to the logs.
|
||||
* The same object is returned if it is invoked multiple times.
|
||||
*/
|
||||
public Appender appender();
|
||||
|
||||
/**
|
||||
* Roll current to previous.
|
||||
*
|
||||
* @return true if the rolling succeeded.
|
||||
* When it returns false, it is not equivalent to an error.
|
||||
* It means that the rolling cannot be performed at the moment,
|
||||
* e.g. the logs are being read.
|
||||
*/
|
||||
public boolean roll() throws IOException;
|
||||
}
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
|
@ -1,69 +1,3 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
|
||||
/**
|
||||
* Choose volumes in round-robin order.
|
||||
*/
|
||||
public class RoundRobinVolumeChoosingPolicy<V extends FsVolumeSpi>
|
||||
implements VolumeChoosingPolicy<V> {
|
||||
|
||||
private int curVolume = 0;
|
||||
|
||||
@Override
|
||||
public synchronized V chooseVolume(final List<V> volumes, final long blockSize
|
||||
) throws IOException {
|
||||
if(volumes.size() < 1) {
|
||||
throw new DiskOutOfSpaceException("No more available volumes");
|
||||
}
|
||||
|
||||
// since volumes could've been removed because of the failure
|
||||
// make sure we are not out of bounds
|
||||
if(curVolume >= volumes.size()) {
|
||||
curVolume = 0;
|
||||
}
|
||||
|
||||
int startVolume = curVolume;
|
||||
long maxAvailable = 0;
|
||||
|
||||
while (true) {
|
||||
final V volume = volumes.get(curVolume);
|
||||
curVolume = (curVolume + 1) % volumes.size();
|
||||
long availableVolumeSize = volume.getAvailable();
|
||||
if (availableVolumeSize > blockSize) { return volume; }
|
||||
|
||||
if (availableVolumeSize > maxAvailable) {
|
||||
maxAvailable = availableVolumeSize;
|
||||
}
|
||||
|
||||
if (curVolume == startVolume) {
|
||||
throw new DiskOutOfSpaceException("Out of space: "
|
||||
+ "The volume with the most available space (=" + maxAvailable
|
||||
+ " B) is less than the block size (=" + blockSize + " B).");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
|
@ -428,6 +428,15 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.block.volume.choice.policy</name>
|
||||
<value>org.apache.hadoop.hdfs.server.datanode.RoundRobinVolumesPolicy</value>
|
||||
<description>The policy class to use to determine into which of the
|
||||
datanode's available volumes a block must be written to. Default is a simple
|
||||
round-robin policy that chooses volumes in a cyclic order.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.heartbeat.interval</name>
|
||||
<value>3</value>
|
||||
|
|
|
@ -25,8 +25,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
|
||||
import org.apache.hadoop.net.DNS;
|
||||
|
||||
|
@ -125,7 +125,7 @@ public class DataNodeCluster {
|
|||
} else if (args[i].equals("-simulated")) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
} else if (args[i].equals("-inject")) {
|
||||
if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
|
||||
if (!FSDatasetInterface.Factory.getFactory(conf).isSimulated()) {
|
||||
System.out.print("-inject is valid only for simulated");
|
||||
printUsageExit();
|
||||
}
|
||||
|
@ -157,7 +157,8 @@ public class DataNodeCluster {
|
|||
System.out.println("No name node address and port in config");
|
||||
System.exit(-1);
|
||||
}
|
||||
boolean simulated = FsDatasetSpi.Factory.getFactory(conf).isSimulated();
|
||||
boolean simulated =
|
||||
FSDatasetInterface.Factory.getFactory(conf).isSimulated();
|
||||
System.out.println("Starting " + numDataNodes +
|
||||
(simulated ? " Simulated " : " ") +
|
||||
" Data Nodes that will connect to Name Node at " + nameNodeAdr);
|
||||
|
|
|
@ -17,29 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HOSTS;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
||||
|
||||
import java.io.File;
|
||||
|
@ -66,6 +43,9 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
import org.apache.hadoop.ha.HAServiceProtocolHelper;
|
||||
import org.apache.hadoop.ha.ServiceFailedException;
|
||||
|
@ -77,20 +57,21 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -1821,7 +1802,7 @@ public class MiniDFSCluster {
|
|||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
|
||||
final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
|
||||
final FSDatasetInterface<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
|
||||
if (!(dataSet instanceof SimulatedFSDataset)) {
|
||||
throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
|
||||
}
|
||||
|
@ -1840,7 +1821,7 @@ public class MiniDFSCluster {
|
|||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
|
||||
final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
|
||||
final FSDatasetInterface<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
|
||||
if (!(dataSet instanceof SimulatedFSDataset)) {
|
||||
throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
|
||||
}
|
||||
|
|
|
@ -60,8 +60,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -211,7 +211,7 @@ public class TestFileCreation extends junit.framework.TestCase {
|
|||
// can't check capacities for real storage since the OS file system may be changing under us.
|
||||
if (simulatedStorage) {
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
|
||||
FSDatasetInterface<?> dataset = DataNodeTestUtils.getFSDataset(dn);
|
||||
assertEquals(fileSize, dataset.getDfsUsed());
|
||||
assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
|
||||
dataset.getRemaining());
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
|
||||
/**
|
||||
|
@ -49,7 +48,7 @@ public class DataNodeTestUtils {
|
|||
*
|
||||
* @return the fsdataset that stores the blocks
|
||||
*/
|
||||
public static FsDatasetSpi<?> getFSDataset(DataNode dn) {
|
||||
public static FSDatasetInterface<?> getFSDataset(DataNode dn) {
|
||||
return dn.getFSDataset();
|
||||
}
|
||||
|
||||
|
|
|
@ -39,12 +39,10 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||
|
@ -67,10 +65,10 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|||
*
|
||||
* Note the synchronization is coarse grained - it is at each method.
|
||||
*/
|
||||
public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
||||
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
|
||||
public class SimulatedFSDataset implements FSDatasetInterface<FsVolumeSpi> {
|
||||
static class Factory extends FSDatasetInterface.Factory<SimulatedFSDataset> {
|
||||
@Override
|
||||
public SimulatedFSDataset newInstance(DataNode datanode,
|
||||
public SimulatedFSDataset createFSDatasetInterface(DataNode datanode,
|
||||
DataStorage storage, Configuration conf) throws IOException {
|
||||
return new SimulatedFSDataset(datanode, storage, conf);
|
||||
}
|
||||
|
@ -429,7 +427,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return map;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
BInfo binfo = map.get(b.getLocalBlock());
|
||||
|
@ -439,7 +437,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
binfo.finalizeBlock(b.getBlockPoolId(), b.getNumBytes());
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized void unfinalizeBlock(ExtendedBlock b) {
|
||||
if (isValidRbw(b)) {
|
||||
blockMap.remove(b.getLocalBlock());
|
||||
|
@ -485,7 +483,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return storage.getNumFailedVolumes();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized long getLength(ExtendedBlock b) throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
BInfo binfo = map.get(b.getLocalBlock());
|
||||
|
@ -515,7 +513,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return r == null? "null": r.toString();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public Block getStoredBlock(String bpid, long blkid) throws IOException {
|
||||
final Map<Block, BInfo> map = blockMap.get(bpid);
|
||||
if (map != null) {
|
||||
|
@ -528,7 +526,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized void invalidate(String bpid, Block[] invalidBlks)
|
||||
throws IOException {
|
||||
boolean error = false;
|
||||
|
@ -559,12 +557,12 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return map == null? null: map.get(b.getLocalBlock());
|
||||
}
|
||||
|
||||
@Override // {@link FsDatasetSpi}
|
||||
@Override // {@link FSDatasetInterface}
|
||||
public boolean contains(ExtendedBlock block) {
|
||||
return getBInfo(block) != null;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized boolean isValidBlock(ExtendedBlock b) {
|
||||
final BInfo binfo = getBInfo(b);
|
||||
return binfo != null && binfo.isFinalized();
|
||||
|
@ -582,7 +580,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return getStorageInfo();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
|
||||
long newGS, long expectedBlockLen) throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
|
@ -595,7 +593,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return binfo;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface recoverAppend(ExtendedBlock b,
|
||||
long newGS, long expectedBlockLen) throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
|
@ -613,7 +611,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return binfo;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
|
||||
throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
|
@ -630,7 +628,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
map.put(binfo.theBlock, binfo);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
|
||||
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
|
@ -649,13 +647,13 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return binfo;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b)
|
||||
throws IOException {
|
||||
return createTemporary(b);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
|
||||
throws IOException {
|
||||
if (isValidBlock(b)) {
|
||||
|
@ -683,7 +681,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return binfo.getIStream();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized InputStream getBlockInputStream(ExtendedBlock b,
|
||||
long seekOffset) throws IOException {
|
||||
InputStream result = getBlockInputStream(b);
|
||||
|
@ -692,13 +690,13 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
}
|
||||
|
||||
/** Not supported */
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
|
||||
long ckoff) throws IOException {
|
||||
throw new IOException("Not supported");
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
|
||||
) throws IOException {
|
||||
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
|
||||
|
@ -719,7 +717,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
// nothing to check for simulated data set
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
|
||||
ReplicaOutputStreams stream,
|
||||
int checksumSize)
|
||||
|
@ -904,32 +902,32 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
|
||||
long recoveryId,
|
||||
long newlength) {
|
||||
return storageId;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public long getReplicaVisibleLength(ExtendedBlock block) {
|
||||
return block.getNumBytes();
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void addBlockPool(String bpid, Configuration conf) {
|
||||
Map<Block, BInfo> map = new HashMap<Block, BInfo>();
|
||||
blockMap.put(bpid, map);
|
||||
storage.addBlockPool(bpid);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void shutdownBlockPool(String bpid) {
|
||||
blockMap.remove(bpid);
|
||||
storage.removeBlockPool(bpid);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@Override // FSDatasetInterface
|
||||
public void deleteBlockPool(String bpid, boolean force) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
|
@ -78,7 +77,7 @@ public class TestBPOfferService {
|
|||
private NNHAStatusHeartbeat[] mockHaStatuses = new NNHAStatusHeartbeat[2];
|
||||
private int heartbeatCounts[] = new int[2];
|
||||
private DataNode mockDn;
|
||||
private FsDatasetSpi<?> mockFSDataset;
|
||||
private FSDatasetInterface<?> mockFSDataset;
|
||||
|
||||
@Before
|
||||
public void setupMocks() throws Exception {
|
||||
|
|
|
@ -1,99 +1,3 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
public class TestRoundRobinVolumeChoosingPolicy {
|
||||
|
||||
// Test the Round-Robin block-volume choosing algorithm.
|
||||
@Test
|
||||
public void testRR() throws Exception {
|
||||
final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
|
||||
|
||||
// First volume, with 100 bytes of space.
|
||||
volumes.add(Mockito.mock(FsVolumeSpi.class));
|
||||
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
|
||||
|
||||
// Second volume, with 200 bytes of space.
|
||||
volumes.add(Mockito.mock(FsVolumeSpi.class));
|
||||
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy =
|
||||
ReflectionUtils.newInstance(RoundRobinVolumeChoosingPolicy.class, null);
|
||||
|
||||
// Test two rounds of round-robin choosing
|
||||
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
|
||||
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
|
||||
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
|
||||
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
|
||||
|
||||
// The first volume has only 100L space, so the policy should
|
||||
// wisely choose the second one in case we ask for more.
|
||||
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150));
|
||||
|
||||
// Fail if no volume can be chosen?
|
||||
try {
|
||||
policy.chooseVolume(volumes, Long.MAX_VALUE);
|
||||
Assert.fail();
|
||||
} catch (IOException e) {
|
||||
// Passed.
|
||||
}
|
||||
}
|
||||
|
||||
// ChooseVolume should throw DiskOutOfSpaceException
|
||||
// with volume and block sizes in exception message.
|
||||
@Test
|
||||
public void testRRPolicyExceptionMessage() throws Exception {
|
||||
final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
|
||||
|
||||
// First volume, with 500 bytes of space.
|
||||
volumes.add(Mockito.mock(FsVolumeSpi.class));
|
||||
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
|
||||
|
||||
// Second volume, with 600 bytes of space.
|
||||
volumes.add(Mockito.mock(FsVolumeSpi.class));
|
||||
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
|
||||
|
||||
final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy
|
||||
= new RoundRobinVolumeChoosingPolicy<FsVolumeSpi>();
|
||||
int blockSize = 700;
|
||||
try {
|
||||
policy.chooseVolume(volumes, blockSize);
|
||||
Assert.fail("expected to throw DiskOutOfSpaceException");
|
||||
} catch(DiskOutOfSpaceException e) {
|
||||
Assert.assertEquals("Not returnig the expected message",
|
||||
"Out of space: The volume with the most available space (=" + 600
|
||||
+ " B) is less than the block size (=" + blockSize + " B).",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
|
@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
|
@ -89,12 +88,12 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
|
||||
public void testFSDatasetFactory() {
|
||||
final Configuration conf = new Configuration();
|
||||
FsDatasetSpi.Factory<?> f = FsDatasetSpi.Factory.getFactory(conf);
|
||||
FSDatasetInterface.Factory<?> f = FSDatasetInterface.Factory.getFactory(conf);
|
||||
assertEquals(FSDataset.Factory.class, f.getClass());
|
||||
assertFalse(f.isSimulated());
|
||||
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
FsDatasetSpi.Factory<?> s = FsDatasetSpi.Factory.getFactory(conf);
|
||||
FSDatasetInterface.Factory<?> s = FSDatasetInterface.Factory.getFactory(conf);
|
||||
assertEquals(SimulatedFSDataset.Factory.class, s.getClass());
|
||||
assertTrue(s.isSimulated());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue