svn merge -c 1242087 from trunk for HDFS-2887.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1242093 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e9c32d2ad1
commit
de87d60e83
|
@ -2,6 +2,13 @@ Hadoop HDFS Change Log
|
|||
|
||||
Release 0.23.2 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
HDFS-2887. FSVolume, is a part of FSDatasetInterface implementation, should
|
||||
not be referred outside FSDataset. A new FSVolumeInterface is defined.
|
||||
The BlockVolumeChoosingPolicy.chooseVolume(..) method signature is also
|
||||
updated. (szetszwo)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
|
|
@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
/**
|
||||
* Performs two types of scanning:
|
||||
* <li> Gets block files from the data directories and reconciles the
|
||||
* difference between the blocks on the disk and in memory in
|
||||
* {@link FSDataset}</li>
|
||||
* difference between the blocks on the disk and in memory.</li>
|
||||
* <li> Scans the data directories for block files under a block pool
|
||||
* and verifies that the files are not corrupt</li>
|
||||
* This keeps track of blocks and their last verification times.
|
||||
|
@ -78,7 +77,7 @@ class BlockPoolSliceScanner {
|
|||
|
||||
private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
|
||||
private DataNode datanode;
|
||||
private FSDataset dataset;
|
||||
private final FSDatasetInterface dataset;
|
||||
|
||||
// sorted set
|
||||
private TreeSet<BlockScanInfo> blockInfoSet;
|
||||
|
@ -137,8 +136,8 @@ class BlockPoolSliceScanner {
|
|||
}
|
||||
}
|
||||
|
||||
BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf,
|
||||
String bpid) {
|
||||
BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
|
||||
Configuration conf, String bpid) {
|
||||
this.datanode = datanode;
|
||||
this.dataset = dataset;
|
||||
this.blockPoolId = bpid;
|
||||
|
@ -220,16 +219,16 @@ class BlockPoolSliceScanner {
|
|||
* otherwise, pick the first directory.
|
||||
*/
|
||||
File dir = null;
|
||||
List<FSVolume> volumes = dataset.volumes.getVolumes();
|
||||
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
|
||||
File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory();
|
||||
List<FSVolumeInterface> volumes = dataset.getVolumes();
|
||||
for (FSVolumeInterface vol : volumes) {
|
||||
File bpDir = vol.getDirectory(blockPoolId);
|
||||
if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
|
||||
dir = bpDir;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (dir == null) {
|
||||
dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory();
|
||||
dir = volumes.get(0).getDirectory(blockPoolId);
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -577,8 +576,8 @@ class BlockPoolSliceScanner {
|
|||
bytesLeft += len;
|
||||
}
|
||||
|
||||
static File getCurrentFile(FSVolume vol, String bpid) throws IOException {
|
||||
return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(),
|
||||
static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
|
||||
return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
|
||||
BlockPoolSliceScanner.verificationLogFile);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
|
||||
/**************************************************
|
||||
* BlockVolumeChoosingPolicy allows a DataNode to
|
||||
|
@ -46,7 +46,7 @@ public interface BlockVolumeChoosingPolicy {
|
|||
* @return the chosen volume to store the block.
|
||||
* @throws IOException when disks are unavailable or are full.
|
||||
*/
|
||||
public FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
|
||||
public FSVolumeInterface chooseVolume(List<FSVolumeInterface> volumes, long blockSize)
|
||||
throws IOException;
|
||||
|
||||
}
|
||||
|
|
|
@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
/**
|
||||
* DataBlockScanner manages block scanning for all the block pools. For each
|
||||
|
@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
public class DataBlockScanner implements Runnable {
|
||||
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
|
||||
private final DataNode datanode;
|
||||
private final FSDataset dataset;
|
||||
private final FSDatasetInterface dataset;
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
|
@ -55,7 +55,7 @@ public class DataBlockScanner implements Runnable {
|
|||
new TreeMap<String, BlockPoolSliceScanner>();
|
||||
Thread blockScannerThread = null;
|
||||
|
||||
DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
|
||||
DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
|
||||
this.datanode = datanode;
|
||||
this.dataset = dataset;
|
||||
this.conf = conf;
|
||||
|
@ -135,7 +135,7 @@ public class DataBlockScanner implements Runnable {
|
|||
.iterator();
|
||||
while (bpidIterator.hasNext()) {
|
||||
String bpid = bpidIterator.next();
|
||||
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
|
||||
for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
|
||||
try {
|
||||
File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
|
||||
if (currFile.exists()) {
|
||||
|
|
|
@ -117,7 +117,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.Util;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
|
||||
|
@ -556,11 +555,11 @@ public class DataNode extends Configured
|
|||
if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
|
||||
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
|
||||
reason = "verification is turned off by configuration";
|
||||
} else if (!(data instanceof FSDataset)) {
|
||||
reason = "verifcation is supported only with FSDataset";
|
||||
} else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
|
||||
reason = "verifcation is not supported by SimulatedFSDataset";
|
||||
}
|
||||
if (reason == null) {
|
||||
blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
|
||||
blockScanner = new DataBlockScanner(this, data, conf);
|
||||
blockScanner.start();
|
||||
} else {
|
||||
LOG.info("Periodic Block Verification scan is disabled because " +
|
||||
|
@ -585,11 +584,11 @@ public class DataNode extends Configured
|
|||
if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
|
||||
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
|
||||
reason = "verification is turned off by configuration";
|
||||
} else if (!(data instanceof FSDataset)) {
|
||||
reason = "verification is supported only with FSDataset";
|
||||
} else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
|
||||
reason = "verifcation is not supported by SimulatedFSDataset";
|
||||
}
|
||||
if (reason == null) {
|
||||
directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
|
||||
directoryScanner = new DirectoryScanner(this, data, conf);
|
||||
directoryScanner.start();
|
||||
} else {
|
||||
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
|
||||
|
@ -2200,16 +2199,7 @@ public class DataNode extends Configured
|
|||
*/
|
||||
@Override // DataNodeMXBean
|
||||
public String getVolumeInfo() {
|
||||
final Map<String, Object> info = new HashMap<String, Object>();
|
||||
Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
|
||||
for (VolumeInfo v : volumes) {
|
||||
final Map<String, Object> innerInfo = new HashMap<String, Object>();
|
||||
innerInfo.put("usedSpace", v.usedSpace);
|
||||
innerInfo.put("freeSpace", v.freeSpace);
|
||||
innerInfo.put("reservedSpace", v.reservedSpace);
|
||||
info.put(v.directory, innerInfo);
|
||||
}
|
||||
return JSON.toString(info);
|
||||
return JSON.toString(data.getVolumeInfoMap());
|
||||
}
|
||||
|
||||
@Override // DataNodeMXBean
|
||||
|
|
|
@ -751,7 +751,7 @@ public class DataStorage extends Storage {
|
|||
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
|
||||
if (matcher.matches()) {
|
||||
//return the current metadata file name
|
||||
return FSDataset.getMetaFileName(matcher.group(1),
|
||||
return DatanodeUtil.getMetaFileName(matcher.group(1),
|
||||
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
|
||||
}
|
||||
return oldFileName;
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -26,6 +28,10 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
/** Provide utility methods for Datanode. */
|
||||
@InterfaceAudience.Private
|
||||
class DatanodeUtil {
|
||||
static final String METADATA_EXTENSION = ".meta";
|
||||
|
||||
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
|
||||
|
||||
private final static String DISK_ERROR = "Possible disk error on file creation: ";
|
||||
|
||||
/** Get the cause of an I/O exception if caused by a possible disk error
|
||||
|
@ -64,4 +70,37 @@ class DatanodeUtil {
|
|||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
static String getMetaFileName(String blockFileName, long genStamp) {
|
||||
return blockFileName + "_" + genStamp + METADATA_EXTENSION;
|
||||
}
|
||||
|
||||
static File getMetaFile(File f, long genStamp) {
|
||||
return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
|
||||
}
|
||||
|
||||
/** Find the corresponding meta data file from a given block file */
|
||||
static File findMetaFile(final File blockFile) throws IOException {
|
||||
final String prefix = blockFile.getName() + "_";
|
||||
final File parent = blockFile.getParentFile();
|
||||
File[] matches = parent.listFiles(new FilenameFilter() {
|
||||
public boolean accept(File dir, String name) {
|
||||
return dir.equals(parent)
|
||||
&& name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
|
||||
}
|
||||
});
|
||||
|
||||
if (matches == null || matches.length == 0) {
|
||||
throw new IOException("Meta file not found, blockFile=" + blockFile);
|
||||
}
|
||||
else if (matches.length > 1) {
|
||||
throw new IOException("Found more than one meta files: "
|
||||
+ Arrays.asList(matches));
|
||||
}
|
||||
return matches[0];
|
||||
}
|
||||
|
||||
static File getUnlinkTmpFile(File f) {
|
||||
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
|
||||
/**
|
||||
* Periodically scans the data directories for block and block metadata files.
|
||||
* Reconciles the differences with block information maintained in
|
||||
* {@link FSDataset}
|
||||
* Reconciles the differences with block information maintained in the dataset.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class DirectoryScanner implements Runnable {
|
||||
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
|
||||
|
||||
private final DataNode datanode;
|
||||
private final FSDataset dataset;
|
||||
private final FSDatasetInterface dataset;
|
||||
private final ExecutorService reportCompileThreadPool;
|
||||
private final ScheduledExecutorService masterThread;
|
||||
private final long scanPeriodMsecs;
|
||||
|
@ -158,13 +157,13 @@ public class DirectoryScanner implements Runnable {
|
|||
private final long blockId;
|
||||
private final File metaFile;
|
||||
private final File blockFile;
|
||||
private final FSVolume volume;
|
||||
private final FSVolumeInterface volume;
|
||||
|
||||
ScanInfo(long blockId) {
|
||||
this(blockId, null, null, null);
|
||||
}
|
||||
|
||||
ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) {
|
||||
ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
|
||||
this.blockId = blockId;
|
||||
this.metaFile = metaFile;
|
||||
this.blockFile = blockFile;
|
||||
|
@ -183,7 +182,7 @@ public class DirectoryScanner implements Runnable {
|
|||
return blockId;
|
||||
}
|
||||
|
||||
FSVolume getVolume() {
|
||||
FSVolumeInterface getVolume() {
|
||||
return volume;
|
||||
}
|
||||
|
||||
|
@ -220,7 +219,7 @@ public class DirectoryScanner implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
|
||||
DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
|
||||
this.datanode = dn;
|
||||
this.dataset = dataset;
|
||||
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
|
||||
|
@ -269,7 +268,7 @@ public class DirectoryScanner implements Runnable {
|
|||
return;
|
||||
}
|
||||
|
||||
String[] bpids = dataset.getBPIdlist();
|
||||
String[] bpids = dataset.getBlockPoolList();
|
||||
for(String bpid : bpids) {
|
||||
UpgradeManagerDatanode um =
|
||||
datanode.getUpgradeManagerDatanode(bpid);
|
||||
|
@ -411,17 +410,29 @@ public class DirectoryScanner implements Runnable {
|
|||
diffRecord.add(new ScanInfo(blockId));
|
||||
}
|
||||
|
||||
/** Is the given volume still valid in the dataset? */
|
||||
private static boolean isValid(final FSDatasetInterface dataset,
|
||||
final FSVolumeInterface volume) {
|
||||
for (FSVolumeInterface vol : dataset.getVolumes()) {
|
||||
if (vol == volume) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Get lists of blocks on the disk sorted by blockId, per blockpool */
|
||||
private Map<String, ScanInfo[]> getDiskReport() {
|
||||
// First get list of data directories
|
||||
List<FSVolume> volumes = dataset.volumes.getVolumes();
|
||||
final List<FSVolumeInterface> volumes = dataset.getVolumes();
|
||||
ArrayList<ScanInfoPerBlockPool> dirReports =
|
||||
new ArrayList<ScanInfoPerBlockPool>(volumes.size());
|
||||
|
||||
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
|
||||
new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
|
||||
for (int i = 0; i < volumes.size(); i++) {
|
||||
if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
|
||||
if (!isValid(dataset, volumes.get(i))) {
|
||||
// volume is invalid
|
||||
dirReports.add(i, null);
|
||||
} else {
|
||||
ReportCompiler reportCompiler =
|
||||
|
@ -446,7 +457,8 @@ public class DirectoryScanner implements Runnable {
|
|||
// Compile consolidated report for all the volumes
|
||||
ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
|
||||
for (int i = 0; i < volumes.size(); i++) {
|
||||
if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
|
||||
if (isValid(dataset, volumes.get(i))) {
|
||||
// volume is still valid
|
||||
list.addAll(dirReports.get(i));
|
||||
}
|
||||
}
|
||||
|
@ -461,9 +473,9 @@ public class DirectoryScanner implements Runnable {
|
|||
|
||||
private static class ReportCompiler
|
||||
implements Callable<ScanInfoPerBlockPool> {
|
||||
private FSVolume volume;
|
||||
private FSVolumeInterface volume;
|
||||
|
||||
public ReportCompiler(FSVolume volume) {
|
||||
public ReportCompiler(FSVolumeInterface volume) {
|
||||
this.volume = volume;
|
||||
}
|
||||
|
||||
|
@ -473,14 +485,14 @@ public class DirectoryScanner implements Runnable {
|
|||
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
|
||||
for (String bpid : bpList) {
|
||||
LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
|
||||
File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir();
|
||||
File bpFinalizedDir = volume.getFinalizedDir(bpid);
|
||||
result.put(bpid, compileReport(volume, bpFinalizedDir, report));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
|
||||
private LinkedList<ScanInfo> compileReport(FSVolume vol, File dir,
|
||||
private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
|
||||
LinkedList<ScanInfo> report) {
|
||||
File[] files;
|
||||
try {
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.io.File;
|
|||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.RandomAccessFile;
|
||||
|
@ -82,14 +81,13 @@ class FSDataset implements FSDatasetInterface {
|
|||
* A node type that can be built into a tree reflecting the
|
||||
* hierarchy of blocks on the local disk.
|
||||
*/
|
||||
class FSDir {
|
||||
File dir;
|
||||
private class FSDir {
|
||||
final File dir;
|
||||
int numBlocks = 0;
|
||||
FSDir children[];
|
||||
int lastChildIdx = 0;
|
||||
/**
|
||||
*/
|
||||
public FSDir(File dir)
|
||||
|
||||
private FSDir(File dir)
|
||||
throws IOException {
|
||||
this.dir = dir;
|
||||
this.children = null;
|
||||
|
@ -114,7 +112,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
}
|
||||
|
||||
public File addBlock(Block b, File src) throws IOException {
|
||||
private File addBlock(Block b, File src) throws IOException {
|
||||
//First try without creating subdirectories
|
||||
File file = addBlock(b, src, false, false);
|
||||
return (file != null) ? file : addBlock(b, src, true, true);
|
||||
|
@ -162,7 +160,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
return children[ lastChildIdx ].addBlock(b, src, true, false);
|
||||
}
|
||||
|
||||
void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
|
||||
private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
|
||||
throws IOException {
|
||||
if (children != null) {
|
||||
for (int i = 0; i < children.length; i++) {
|
||||
|
@ -208,7 +206,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
* check if a data diretory is healthy
|
||||
* @throws DiskErrorException
|
||||
*/
|
||||
public void checkDirTree() throws DiskErrorException {
|
||||
private void checkDirTree() throws DiskErrorException {
|
||||
DiskChecker.checkDir(dir);
|
||||
|
||||
if (children != null) {
|
||||
|
@ -218,7 +216,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
}
|
||||
|
||||
void clearPath(File f) {
|
||||
private void clearPath(File f) {
|
||||
String root = dir.getAbsolutePath();
|
||||
String dir = f.getAbsolutePath();
|
||||
if (dir.startsWith(root)) {
|
||||
|
@ -271,7 +269,8 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "FSDir{" +
|
||||
"dir=" + dir +
|
||||
|
@ -285,7 +284,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
* Taken together, all BlockPoolSlices sharing a block pool ID across a
|
||||
* cluster represent a single block pool.
|
||||
*/
|
||||
class BlockPoolSlice {
|
||||
private class BlockPoolSlice {
|
||||
private final String bpid;
|
||||
private final FSVolume volume; // volume to which this BlockPool belongs to
|
||||
private final File currentDir; // StorageDirectory/current/bpid/current
|
||||
|
@ -343,11 +342,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
File getDirectory() {
|
||||
return currentDir.getParentFile();
|
||||
}
|
||||
|
||||
File getCurrentDir() {
|
||||
return currentDir;
|
||||
}
|
||||
|
||||
|
||||
File getFinalizedDir() {
|
||||
return finalizedDir.dir;
|
||||
}
|
||||
|
@ -388,7 +383,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
|
||||
File addBlock(Block b, File f) throws IOException {
|
||||
File blockFile = finalizedDir.addBlock(b, f);
|
||||
File metaFile = getMetaFile(blockFile , b.getGenerationStamp());
|
||||
File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
|
||||
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
|
||||
return blockFile;
|
||||
}
|
||||
|
@ -456,7 +451,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
DataInputStream checksumIn = null;
|
||||
InputStream blockIn = null;
|
||||
try {
|
||||
File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp));
|
||||
final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
|
||||
long blockFileLen = blockFile.length();
|
||||
long metaFileLen = metaFile.length();
|
||||
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
|
||||
|
@ -522,7 +517,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
}
|
||||
|
||||
class FSVolume {
|
||||
class FSVolume implements FSVolumeInterface {
|
||||
private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
|
||||
private final File currentDir; // <StorageDirectory>/current
|
||||
private final DF usage;
|
||||
|
@ -535,11 +530,6 @@ class FSDataset implements FSDatasetInterface {
|
|||
File parent = currentDir.getParentFile();
|
||||
this.usage = new DF(parent, conf);
|
||||
}
|
||||
|
||||
/** Return storage directory corresponding to the volume */
|
||||
File getDir() {
|
||||
return currentDir.getParentFile();
|
||||
}
|
||||
|
||||
File getCurrentDir() {
|
||||
return currentDir;
|
||||
|
@ -584,8 +574,9 @@ class FSDataset implements FSDatasetInterface {
|
|||
long remaining = usage.getCapacity() - reserved;
|
||||
return remaining > 0 ? remaining : 0;
|
||||
}
|
||||
|
||||
long getAvailable() throws IOException {
|
||||
|
||||
@Override
|
||||
public long getAvailable() throws IOException {
|
||||
long remaining = getCapacity()-getDfsUsed();
|
||||
long available = usage.getAvailable();
|
||||
if (remaining>available) {
|
||||
|
@ -601,19 +592,30 @@ class FSDataset implements FSDatasetInterface {
|
|||
String getMount() throws IOException {
|
||||
return usage.getMount();
|
||||
}
|
||||
|
||||
BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
|
||||
|
||||
private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
|
||||
BlockPoolSlice bp = map.get(bpid);
|
||||
if (bp == null) {
|
||||
throw new IOException("block pool " + bpid + " is not found");
|
||||
}
|
||||
return bp;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public File getDirectory(String bpid) throws IOException {
|
||||
return getBlockPoolSlice(bpid).getDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public File getFinalizedDir(String bpid) throws IOException {
|
||||
return getBlockPoolSlice(bpid).getFinalizedDir();
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a deep copy of the list of currently active BPIDs
|
||||
*/
|
||||
String[] getBlockPoolList() {
|
||||
@Override
|
||||
public String[] getBlockPoolList() {
|
||||
synchronized(FSDataset.this) {
|
||||
return map.keySet().toArray(new String[map.keySet().size()]);
|
||||
}
|
||||
|
@ -682,7 +684,8 @@ class FSDataset implements FSDatasetInterface {
|
|||
BlockPoolSlice bp = getBlockPoolSlice(bpid);
|
||||
bp.clearPath(f);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return currentDir.getAbsolutePath();
|
||||
}
|
||||
|
@ -774,21 +777,18 @@ class FSDataset implements FSDatasetInterface {
|
|||
* Read access to this unmodifiable list is not synchronized.
|
||||
* This list is replaced on modification holding "this" lock.
|
||||
*/
|
||||
private volatile List<FSVolume> volumes = null;
|
||||
private volatile List<FSVolumeInterface> volumes = null;
|
||||
|
||||
BlockVolumeChoosingPolicy blockChooser;
|
||||
int numFailedVolumes;
|
||||
|
||||
FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
|
||||
List<FSVolume> list = Arrays.asList(volumes);
|
||||
this.volumes = Collections.unmodifiableList(list);
|
||||
FSVolumeSet(List<FSVolumeInterface> volumes, int failedVols,
|
||||
BlockVolumeChoosingPolicy blockChooser) {
|
||||
this.volumes = Collections.unmodifiableList(volumes);
|
||||
this.blockChooser = blockChooser;
|
||||
this.numFailedVolumes = failedVols;
|
||||
}
|
||||
|
||||
private int numberOfVolumes() {
|
||||
return volumes.size();
|
||||
}
|
||||
|
||||
private int numberOfFailedVolumes() {
|
||||
return numFailedVolumes;
|
||||
}
|
||||
|
@ -801,36 +801,36 @@ class FSDataset implements FSDatasetInterface {
|
|||
* @return next volume to store the block in.
|
||||
*/
|
||||
synchronized FSVolume getNextVolume(long blockSize) throws IOException {
|
||||
return blockChooser.chooseVolume(volumes, blockSize);
|
||||
return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
|
||||
}
|
||||
|
||||
private long getDfsUsed() throws IOException {
|
||||
long dfsUsed = 0L;
|
||||
for (FSVolume vol : volumes) {
|
||||
dfsUsed += vol.getDfsUsed();
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
dfsUsed += ((FSVolume)v).getDfsUsed();
|
||||
}
|
||||
return dfsUsed;
|
||||
}
|
||||
|
||||
private long getBlockPoolUsed(String bpid) throws IOException {
|
||||
long dfsUsed = 0L;
|
||||
for (FSVolume vol : volumes) {
|
||||
dfsUsed += vol.getBlockPoolUsed(bpid);
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
|
||||
}
|
||||
return dfsUsed;
|
||||
}
|
||||
|
||||
private long getCapacity() throws IOException {
|
||||
long capacity = 0L;
|
||||
for (FSVolume vol : volumes) {
|
||||
capacity += vol.getCapacity();
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
capacity += ((FSVolume)v).getCapacity();
|
||||
}
|
||||
return capacity;
|
||||
}
|
||||
|
||||
private long getRemaining() throws IOException {
|
||||
long remaining = 0L;
|
||||
for (FSVolume vol : volumes) {
|
||||
for (FSVolumeInterface vol : volumes) {
|
||||
remaining += vol.getAvailable();
|
||||
}
|
||||
return remaining;
|
||||
|
@ -838,15 +838,15 @@ class FSDataset implements FSDatasetInterface {
|
|||
|
||||
private void getVolumeMap(ReplicasMap volumeMap)
|
||||
throws IOException {
|
||||
for (FSVolume vol : volumes) {
|
||||
vol.getVolumeMap(volumeMap);
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
((FSVolume)v).getVolumeMap(volumeMap);
|
||||
}
|
||||
}
|
||||
|
||||
private void getVolumeMap(String bpid, ReplicasMap volumeMap)
|
||||
throws IOException {
|
||||
for (FSVolume vol : volumes) {
|
||||
vol.getVolumeMap(bpid, volumeMap);
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
((FSVolume)v).getVolumeMap(bpid, volumeMap);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -862,10 +862,10 @@ class FSDataset implements FSDatasetInterface {
|
|||
ArrayList<FSVolume> removedVols = null;
|
||||
|
||||
// Make a copy of volumes for performing modification
|
||||
List<FSVolume> volumeList = new ArrayList<FSVolume>(getVolumes());
|
||||
final List<FSVolumeInterface> volumeList = new ArrayList<FSVolumeInterface>(volumes);
|
||||
|
||||
for (int idx = 0; idx < volumeList.size(); idx++) {
|
||||
FSVolume fsv = volumeList.get(idx);
|
||||
FSVolume fsv = (FSVolume)volumeList.get(idx);
|
||||
try {
|
||||
fsv.checkDirs();
|
||||
} catch (DiskErrorException e) {
|
||||
|
@ -882,8 +882,8 @@ class FSDataset implements FSDatasetInterface {
|
|||
|
||||
// Remove null volumes from the volumes array
|
||||
if (removedVols != null && removedVols.size() > 0) {
|
||||
List<FSVolume> newVols = new ArrayList<FSVolume>();
|
||||
for (FSVolume vol : volumeList) {
|
||||
List<FSVolumeInterface> newVols = new ArrayList<FSVolumeInterface>();
|
||||
for (FSVolumeInterface vol : volumeList) {
|
||||
if (vol != null) {
|
||||
newVols.add(vol);
|
||||
}
|
||||
|
@ -896,44 +896,30 @@ class FSDataset implements FSDatasetInterface {
|
|||
|
||||
return removedVols;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return volumes.toString();
|
||||
}
|
||||
|
||||
boolean isValid(FSVolume volume) {
|
||||
for (FSVolume vol : volumes) {
|
||||
if (vol == volume) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void addBlockPool(String bpid, Configuration conf)
|
||||
throws IOException {
|
||||
for (FSVolume v : volumes) {
|
||||
v.addBlockPool(bpid, conf);
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
((FSVolume)v).addBlockPool(bpid, conf);
|
||||
}
|
||||
}
|
||||
|
||||
private void removeBlockPool(String bpid) {
|
||||
for (FSVolume v : volumes) {
|
||||
v.shutdownBlockPool(bpid);
|
||||
for (FSVolumeInterface v : volumes) {
|
||||
((FSVolume)v).shutdownBlockPool(bpid);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return unmodifiable list of volumes
|
||||
*/
|
||||
public List<FSVolume> getVolumes() {
|
||||
return volumes;
|
||||
}
|
||||
|
||||
private void shutdown() {
|
||||
for (FSVolume volume : volumes) {
|
||||
for (FSVolumeInterface volume : volumes) {
|
||||
if(volume != null) {
|
||||
volume.shutdown();
|
||||
((FSVolume)volume).shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -945,35 +931,20 @@ class FSDataset implements FSDatasetInterface {
|
|||
//
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
//Find better place?
|
||||
static final String METADATA_EXTENSION = ".meta";
|
||||
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
|
||||
|
||||
private static boolean isUnlinkTmpFile(File f) {
|
||||
String name = f.getName();
|
||||
return name.endsWith(UNLINK_BLOCK_SUFFIX);
|
||||
}
|
||||
|
||||
static File getUnlinkTmpFile(File f) {
|
||||
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
|
||||
return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
|
||||
}
|
||||
|
||||
private static File getOrigFile(File unlinkTmpFile) {
|
||||
String fileName = unlinkTmpFile.getName();
|
||||
return new File(unlinkTmpFile.getParentFile(),
|
||||
fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length()));
|
||||
}
|
||||
|
||||
static String getMetaFileName(String blockFileName, long genStamp) {
|
||||
return blockFileName + "_" + genStamp + METADATA_EXTENSION;
|
||||
}
|
||||
|
||||
static File getMetaFile(File f , long genStamp) {
|
||||
return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
|
||||
fileName.substring(0,
|
||||
fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
|
||||
}
|
||||
|
||||
protected File getMetaFile(ExtendedBlock b) throws IOException {
|
||||
return getMetaFile(getBlockFile(b), b.getGenerationStamp());
|
||||
return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
|
||||
}
|
||||
|
||||
/** Find the metadata file for the specified block file.
|
||||
|
@ -995,34 +966,13 @@ class FSDataset implements FSDatasetInterface {
|
|||
" does not have a metafile!");
|
||||
return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
|
||||
}
|
||||
|
||||
/** Find the corresponding meta data file from a given block file */
|
||||
private static File findMetaFile(final File blockFile) throws IOException {
|
||||
final String prefix = blockFile.getName() + "_";
|
||||
final File parent = blockFile.getParentFile();
|
||||
File[] matches = parent.listFiles(new FilenameFilter() {
|
||||
public boolean accept(File dir, String name) {
|
||||
return dir.equals(parent)
|
||||
&& name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
|
||||
}
|
||||
});
|
||||
|
||||
if (matches == null || matches.length == 0) {
|
||||
throw new IOException("Meta file not found, blockFile=" + blockFile);
|
||||
}
|
||||
else if (matches.length > 1) {
|
||||
throw new IOException("Found more than one meta files: "
|
||||
+ Arrays.asList(matches));
|
||||
}
|
||||
return matches[0];
|
||||
}
|
||||
|
||||
/** Find the corresponding meta data file from a given block file */
|
||||
private static long parseGenerationStamp(File blockFile, File metaFile
|
||||
) throws IOException {
|
||||
String metaname = metaFile.getName();
|
||||
String gs = metaname.substring(blockFile.getName().length() + 1,
|
||||
metaname.length() - METADATA_EXTENSION.length());
|
||||
metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
|
||||
try {
|
||||
return Long.parseLong(gs);
|
||||
} catch(NumberFormatException nfe) {
|
||||
|
@ -1031,6 +981,11 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
}
|
||||
|
||||
@Override // FSDatasetInterface
|
||||
public List<FSVolumeInterface> getVolumes() {
|
||||
return volumes.volumes;
|
||||
}
|
||||
|
||||
@Override // FSDatasetInterface
|
||||
public synchronized Block getStoredBlock(String bpid, long blkid)
|
||||
throws IOException {
|
||||
|
@ -1038,7 +993,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
if (blockfile == null) {
|
||||
return null;
|
||||
}
|
||||
File metafile = findMetaFile(blockfile);
|
||||
final File metafile = DatanodeUtil.findMetaFile(blockfile);
|
||||
return new Block(blkid, blockfile.length(),
|
||||
parseGenerationStamp(blockfile, metafile));
|
||||
}
|
||||
|
@ -1102,7 +1057,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
/**
|
||||
* An FSDataset has a directory where it loads its data files.
|
||||
*/
|
||||
public FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
|
||||
FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
|
||||
throws IOException {
|
||||
this.datanode = datanode;
|
||||
this.maxBlocksPerDir =
|
||||
|
@ -1135,12 +1090,12 @@ class FSDataset implements FSDatasetInterface {
|
|||
+ ", volume failures tolerated: " + volFailuresTolerated);
|
||||
}
|
||||
|
||||
FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
|
||||
final List<FSVolumeInterface> volArray = new ArrayList<FSVolumeInterface>(
|
||||
storage.getNumStorageDirs());
|
||||
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
|
||||
volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
|
||||
conf);
|
||||
DataNode.LOG.info("FSDataset added volume - "
|
||||
+ storage.getStorageDir(idx).getCurrentDir());
|
||||
final File dir = storage.getStorageDir(idx).getCurrentDir();
|
||||
volArray.add(new FSVolume(dir, conf));
|
||||
DataNode.LOG.info("FSDataset added volume - " + dir);
|
||||
}
|
||||
volumeMap = new ReplicasMap(this);
|
||||
|
||||
|
@ -1186,7 +1141,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
*/
|
||||
@Override // FSDatasetInterface
|
||||
public boolean hasEnoughResource() {
|
||||
return volumes.numberOfVolumes() >= validVolsRequired;
|
||||
return getVolumes().size() >= validVolsRequired;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1369,8 +1324,8 @@ class FSDataset implements FSDatasetInterface {
|
|||
private static File moveBlockFiles(Block b, File srcfile, File destdir
|
||||
) throws IOException {
|
||||
final File dstfile = new File(destdir, b.getBlockName());
|
||||
final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
|
||||
final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
|
||||
final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
|
||||
final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
|
||||
if (!srcmeta.renameTo(dstmeta)) {
|
||||
throw new IOException("Failed to move meta file for " + b
|
||||
+ " from " + srcmeta + " to " + dstmeta);
|
||||
|
@ -1488,7 +1443,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
|
||||
// construct a RBW replica with the new GS
|
||||
File blkfile = replicaInfo.getBlockFile();
|
||||
FSVolume v = replicaInfo.getVolume();
|
||||
FSVolume v = (FSVolume)replicaInfo.getVolume();
|
||||
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
|
||||
throw new DiskOutOfSpaceException("Insufficient space for appending to "
|
||||
+ replicaInfo);
|
||||
|
@ -1745,7 +1700,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
+ visible + ", temp=" + temp);
|
||||
}
|
||||
// check volume
|
||||
final FSVolume v = temp.getVolume();
|
||||
final FSVolume v = (FSVolume)temp.getVolume();
|
||||
if (v == null) {
|
||||
throw new IOException("r.getVolume() = null, temp=" + temp);
|
||||
}
|
||||
|
@ -1806,7 +1761,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
if ( vol == null ) {
|
||||
ReplicaInfo replica = volumeMap.get(bpid, blk);
|
||||
if (replica != null) {
|
||||
vol = volumeMap.get(bpid, blk).getVolume();
|
||||
vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
|
||||
}
|
||||
if ( vol == null ) {
|
||||
throw new IOException("Could not find volume for block " + blk);
|
||||
|
@ -1846,7 +1801,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
newReplicaInfo = (FinalizedReplica)
|
||||
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
|
||||
} else {
|
||||
FSVolume v = replicaInfo.getVolume();
|
||||
FSVolume v = (FSVolume)replicaInfo.getVolume();
|
||||
File f = replicaInfo.getBlockFile();
|
||||
if (v == null) {
|
||||
throw new IOException("No volume for temporary file " + f +
|
||||
|
@ -1944,7 +1899,8 @@ class FSDataset implements FSDatasetInterface {
|
|||
/**
|
||||
* Get the list of finalized blocks from in-memory blockmap for a block pool.
|
||||
*/
|
||||
synchronized List<Block> getFinalizedBlocks(String bpid) {
|
||||
@Override
|
||||
public synchronized List<Block> getFinalizedBlocks(String bpid) {
|
||||
ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
|
||||
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
|
||||
if(b.getState() == ReplicaState.FINALIZED) {
|
||||
|
@ -2017,7 +1973,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
|
||||
//check replica's meta file
|
||||
final File metafile = getMetaFile(f, r.getGenerationStamp());
|
||||
final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
|
||||
if (!metafile.exists()) {
|
||||
throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
|
||||
}
|
||||
|
@ -2048,7 +2004,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
error = true;
|
||||
continue;
|
||||
}
|
||||
v = dinfo.getVolume();
|
||||
v = (FSVolume)dinfo.getVolume();
|
||||
if (f == null) {
|
||||
DataNode.LOG.warn("Unexpected error trying to delete block "
|
||||
+ invalidBlks[i] +
|
||||
|
@ -2082,7 +2038,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
volumeMap.remove(bpid, invalidBlks[i]);
|
||||
}
|
||||
File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
|
||||
File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
|
||||
|
||||
// Delete the block asynchronously to make sure we can do it fast enough
|
||||
asyncDiskService.deleteAsync(v, bpid, f, metaFile,
|
||||
|
@ -2235,8 +2191,9 @@ class FSDataset implements FSDatasetInterface {
|
|||
* @param diskMetaFile Metadata file from on the disk
|
||||
* @param vol Volume of the block file
|
||||
*/
|
||||
@Override
|
||||
public void checkAndUpdate(String bpid, long blockId, File diskFile,
|
||||
File diskMetaFile, FSVolume vol) {
|
||||
File diskMetaFile, FSVolumeInterface vol) {
|
||||
Block corruptBlock = null;
|
||||
ReplicaInfo memBlockInfo;
|
||||
synchronized (this) {
|
||||
|
@ -2324,7 +2281,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
|
||||
// Compare generation stamp
|
||||
if (memBlockInfo.getGenerationStamp() != diskGS) {
|
||||
File memMetaFile = getMetaFile(diskFile,
|
||||
File memMetaFile = DatanodeUtil.getMetaFile(diskFile,
|
||||
memBlockInfo.getGenerationStamp());
|
||||
if (memMetaFile.exists()) {
|
||||
if (memMetaFile.compareTo(diskMetaFile) != 0) {
|
||||
|
@ -2559,18 +2516,15 @@ class FSDataset implements FSDatasetInterface {
|
|||
volumes.removeBlockPool(bpid);
|
||||
}
|
||||
|
||||
/**
|
||||
* get list of all bpids
|
||||
* @return list of bpids
|
||||
*/
|
||||
public String [] getBPIdlist() throws IOException {
|
||||
@Override
|
||||
public String[] getBlockPoolList() {
|
||||
return volumeMap.getBlockPoolList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for representing the Datanode volume information
|
||||
*/
|
||||
static class VolumeInfo {
|
||||
private static class VolumeInfo {
|
||||
final String directory;
|
||||
final long usedSpace;
|
||||
final long freeSpace;
|
||||
|
@ -2583,10 +2537,11 @@ class FSDataset implements FSDatasetInterface {
|
|||
this.reservedSpace = reservedSpace;
|
||||
}
|
||||
}
|
||||
|
||||
Collection<VolumeInfo> getVolumeInfo() {
|
||||
|
||||
private Collection<VolumeInfo> getVolumeInfo() {
|
||||
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
|
||||
for (FSVolume volume : volumes.volumes) {
|
||||
for (FSVolumeInterface v : volumes.volumes) {
|
||||
final FSVolume volume = (FSVolume)v;
|
||||
long used = 0;
|
||||
long free = 0;
|
||||
try {
|
||||
|
@ -2603,13 +2558,27 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getVolumeInfoMap() {
|
||||
final Map<String, Object> info = new HashMap<String, Object>();
|
||||
Collection<VolumeInfo> volumes = getVolumeInfo();
|
||||
for (VolumeInfo v : volumes) {
|
||||
final Map<String, Object> innerInfo = new HashMap<String, Object>();
|
||||
innerInfo.put("usedSpace", v.usedSpace);
|
||||
innerInfo.put("freeSpace", v.freeSpace);
|
||||
innerInfo.put("reservedSpace", v.reservedSpace);
|
||||
info.put(v.directory, innerInfo);
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
@Override //FSDatasetInterface
|
||||
public synchronized void deleteBlockPool(String bpid, boolean force)
|
||||
throws IOException {
|
||||
if (!force) {
|
||||
for (FSVolume volume : volumes.volumes) {
|
||||
if (!volume.isBPDirEmpty(bpid)) {
|
||||
for (FSVolumeInterface volume : volumes.volumes) {
|
||||
if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
|
||||
DataNode.LOG.warn(bpid
|
||||
+ " has some block files, cannot delete unless forced");
|
||||
throw new IOException("Cannot delete block pool, "
|
||||
|
@ -2617,8 +2586,8 @@ class FSDataset implements FSDatasetInterface {
|
|||
}
|
||||
}
|
||||
}
|
||||
for (FSVolume volume : volumes.volumes) {
|
||||
volume.deleteBPDirectories(bpid, force);
|
||||
for (FSVolumeInterface volume : volumes.volumes) {
|
||||
((FSVolume)volume).deleteBPDirectories(bpid, force);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2626,7 +2595,7 @@ class FSDataset implements FSDatasetInterface {
|
|||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
|
||||
throws IOException {
|
||||
File datafile = getBlockFile(block);
|
||||
File metafile = getMetaFile(datafile, block.getGenerationStamp());
|
||||
File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
|
||||
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
|
||||
datafile.getAbsolutePath(), metafile.getAbsolutePath());
|
||||
return info;
|
||||
|
|
|
@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|||
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.File;
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -46,8 +49,44 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface FSDatasetInterface extends FSDatasetMBean {
|
||||
|
||||
|
||||
/**
|
||||
* This is an interface for the underlying volume.
|
||||
* @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
|
||||
*/
|
||||
interface FSVolumeInterface {
|
||||
/** @return a list of block pools. */
|
||||
public String[] getBlockPoolList();
|
||||
|
||||
/** @return the available storage space in bytes. */
|
||||
public long getAvailable() throws IOException;
|
||||
|
||||
/** @return the directory for the block pool. */
|
||||
public File getDirectory(String bpid) throws IOException;
|
||||
|
||||
/** @return the directory for the finalized blocks in the block pool. */
|
||||
public File getFinalizedDir(String bpid) throws IOException;
|
||||
}
|
||||
|
||||
/** @return a list of volumes. */
|
||||
public List<FSVolumeInterface> getVolumes();
|
||||
|
||||
/** @return a volume information map (name => info). */
|
||||
public Map<String, Object> getVolumeInfoMap();
|
||||
|
||||
/** @return a list of block pools. */
|
||||
public String[] getBlockPoolList();
|
||||
|
||||
/** @return a list of finalized blocks for the given block pool. */
|
||||
public List<Block> getFinalizedBlocks(String bpid);
|
||||
|
||||
/**
|
||||
* Check whether the in-memory block record matches the block on the disk,
|
||||
* and, in case that they are not matched, update the record or mark it
|
||||
* as corrupted.
|
||||
*/
|
||||
public void checkAndUpdate(String bpid, long blockId, File diskFile,
|
||||
File diskMetaFile, FSVolumeInterface vol);
|
||||
|
||||
/**
|
||||
* Returns the length of the metadata file of the specified block
|
||||
* @param b - the block for which the metadata length is desired
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.io.File;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
|
||||
/**
|
||||
* This class describes a replica that has been finalized.
|
||||
|
@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaInfo {
|
|||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
FinalizedReplica(long blockId, long len, long genStamp,
|
||||
FSVolume vol, File dir) {
|
||||
FSVolumeInterface vol, File dir) {
|
||||
super(blockId, len, genStamp, vol, dir);
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaInfo {
|
|||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
FinalizedReplica(Block block, FSVolume vol, File dir) {
|
||||
FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
|
||||
super(block, vol, dir);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.io.File;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
|
||||
/** This class represents replicas being written.
|
||||
* Those are the replicas that
|
||||
|
@ -36,7 +36,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
|
|||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
ReplicaBeingWritten(long blockId, long genStamp,
|
||||
FSVolume vol, File dir) {
|
||||
FSVolumeInterface vol, File dir) {
|
||||
super( blockId, genStamp, vol, dir);
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
|
|||
* @param writer a thread that is writing to this replica
|
||||
*/
|
||||
ReplicaBeingWritten(Block block,
|
||||
FSVolume vol, File dir, Thread writer) {
|
||||
FSVolumeInterface vol, File dir, Thread writer) {
|
||||
super( block, vol, dir, writer);
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
|
|||
* @param writer a thread that is writing to this replica
|
||||
*/
|
||||
ReplicaBeingWritten(long blockId, long len, long genStamp,
|
||||
FSVolume vol, File dir, Thread writer ) {
|
||||
FSVolumeInterface vol, File dir, Thread writer ) {
|
||||
super( blockId, len, genStamp, vol, dir, writer);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
|
@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaInfo
|
|||
* @param state replica state
|
||||
*/
|
||||
ReplicaInPipeline(long blockId, long genStamp,
|
||||
FSVolume vol, File dir) {
|
||||
FSVolumeInterface vol, File dir) {
|
||||
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaInfo
|
|||
* @param writer a thread that is writing to this replica
|
||||
*/
|
||||
ReplicaInPipeline(Block block,
|
||||
FSVolume vol, File dir, Thread writer) {
|
||||
FSVolumeInterface vol, File dir, Thread writer) {
|
||||
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
|
||||
vol, dir, writer);
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaInfo
|
|||
* @param writer a thread that is writing to this replica
|
||||
*/
|
||||
ReplicaInPipeline(long blockId, long len, long genStamp,
|
||||
FSVolume vol, File dir, Thread writer ) {
|
||||
FSVolumeInterface vol, File dir, Thread writer ) {
|
||||
super( blockId, len, genStamp, vol, dir);
|
||||
this.bytesAcked = len;
|
||||
this.bytesOnDisk = len;
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.HardLink;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
/**
|
||||
|
@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
abstract public class ReplicaInfo extends Block implements Replica {
|
||||
private FSVolume volume; // volume where the replica belongs
|
||||
private File dir; // directory where block & meta files belong
|
||||
/** volume where the replica belongs */
|
||||
private FSVolumeInterface volume;
|
||||
/** directory where block & meta files belong */
|
||||
private File dir;
|
||||
|
||||
/**
|
||||
* Constructor for a zero length replica
|
||||
|
@ -45,7 +47,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
|||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) {
|
||||
ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
|
||||
this( blockId, 0L, genStamp, vol, dir);
|
||||
}
|
||||
|
||||
|
@ -55,7 +57,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
|||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
ReplicaInfo(Block block, FSVolume vol, File dir) {
|
||||
ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
|
||||
this(block.getBlockId(), block.getNumBytes(),
|
||||
block.getGenerationStamp(), vol, dir);
|
||||
}
|
||||
|
@ -69,7 +71,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
|||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
ReplicaInfo(long blockId, long len, long genStamp,
|
||||
FSVolume vol, File dir) {
|
||||
FSVolumeInterface vol, File dir) {
|
||||
super(blockId, len, genStamp);
|
||||
this.volume = vol;
|
||||
this.dir = dir;
|
||||
|
@ -111,14 +113,14 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
|||
* Get the volume where this replica is located on disk
|
||||
* @return the volume where this replica is located on disk
|
||||
*/
|
||||
FSVolume getVolume() {
|
||||
FSVolumeInterface getVolume() {
|
||||
return volume;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the volume where this replica is located on disk
|
||||
*/
|
||||
void setVolume(FSVolume vol) {
|
||||
void setVolume(FSVolumeInterface vol) {
|
||||
this.volume = vol;
|
||||
}
|
||||
|
||||
|
@ -162,7 +164,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
|||
* be recovered (especially on Windows) on datanode restart.
|
||||
*/
|
||||
private void unlinkFile(File file, Block b) throws IOException {
|
||||
File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
|
||||
File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
|
||||
try {
|
||||
FileInputStream in = new FileInputStream(file);
|
||||
try {
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|||
import java.io.File;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||
|
||||
/**
|
||||
|
@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends ReplicaInfo {
|
|||
}
|
||||
|
||||
@Override //ReplicaInfo
|
||||
void setVolume(FSVolume vol) {
|
||||
void setVolume(FSVolumeInterface vol) {
|
||||
super.setVolume(vol);
|
||||
original.setVolume(vol);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.io.File;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
|
||||
/**
|
||||
* This class represents a replica that is waiting to be recovered.
|
||||
|
@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
|
|||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
|
||||
FSVolume vol, File dir) {
|
||||
FSVolumeInterface vol, File dir) {
|
||||
super(blockId, len, genStamp, vol, dir);
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
|
|||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
*/
|
||||
ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) {
|
||||
ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
|
||||
super(block, vol, dir);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
|
||||
public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
|
||||
|
@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
|
|||
private int curVolume = 0;
|
||||
|
||||
@Override
|
||||
public synchronized FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
|
||||
throws IOException {
|
||||
public synchronized FSVolumeInterface chooseVolume(
|
||||
List<FSVolumeInterface> volumes, long blockSize) throws IOException {
|
||||
if(volumes.size() < 1) {
|
||||
throw new DiskOutOfSpaceException("No more available volumes");
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
|
|||
long maxAvailable = 0;
|
||||
|
||||
while (true) {
|
||||
FSVolume volume = volumes.get(curVolume);
|
||||
FSVolumeInterface volume = volumes.get(curVolume);
|
||||
curVolume = (curVolume + 1) % volumes.size();
|
||||
long availableVolumeSize = volume.getAvailable();
|
||||
if (availableVolumeSize > blockSize) { return volume; }
|
||||
|
|
|
@ -17,12 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -38,11 +40,10 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
|||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
|
@ -988,8 +989,33 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
|
|||
}
|
||||
|
||||
@Override
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
|
||||
throws IOException {
|
||||
throw new IOException("getBlockLocalPathInfo not supported.");
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getBlockPoolList() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkAndUpdate(String bpid, long blockId, File diskFile,
|
||||
File diskMetaFile, FSVolumeInterface vol) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<FSVolumeInterface> getVolumes() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Block> getFinalizedBlocks(String bpid) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getVolumeInfoMap() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import static org.junit.Assert.assertNotNull;
|
|||
import static org.junit.Assert.assertNotSame;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.junit.Assert;
|
||||
|
@ -81,11 +80,11 @@ public class TestDataNodeMultipleRegistrations {
|
|||
|
||||
// check number of volumes in fsdataset
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
|
||||
assertNotNull("No volumes in the fsdataset", volInfos);
|
||||
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
|
||||
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
|
||||
int i = 0;
|
||||
for (VolumeInfo vi : volInfos) {
|
||||
LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
|
||||
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
|
||||
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
|
||||
}
|
||||
// number of volumes should be 2 - [data1, data2]
|
||||
assertEquals("number of volumes is wrong", 2, volInfos.size());
|
||||
|
@ -143,11 +142,11 @@ public class TestDataNodeMultipleRegistrations {
|
|||
|
||||
// check number of vlumes in fsdataset
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
|
||||
assertNotNull("No volumes in the fsdataset", volInfos);
|
||||
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
|
||||
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
|
||||
int i = 0;
|
||||
for (VolumeInfo vi : volInfos) {
|
||||
LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
|
||||
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
|
||||
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
|
||||
}
|
||||
// number of volumes should be 2 - [data1, data2]
|
||||
assertEquals("number of volumes is wrong", 2, volInfos.size());
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
|
@ -29,8 +32,8 @@ import java.util.Map;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -42,13 +45,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Fine-grain testing of block files and locations after volume failure.
|
||||
|
@ -272,8 +272,7 @@ public class TestDataNodeVolumeFailure {
|
|||
String file = BlockReaderFactory.getFileName(targetAddr,
|
||||
"test-blockpoolid",
|
||||
block.getBlockId());
|
||||
BlockReader blockReader =
|
||||
BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
|
||||
BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
|
||||
.getBlockToken(), 0, -1);
|
||||
|
||||
// nothing - if it fails - it will throw and exception
|
||||
|
@ -370,7 +369,7 @@ public class TestDataNodeVolumeFailure {
|
|||
new FilenameFilter() {
|
||||
public boolean accept(File dir, String name) {
|
||||
return name.startsWith("blk_") &&
|
||||
name.endsWith(FSDataset.METADATA_EXTENSION);
|
||||
name.endsWith(DatanodeUtil.METADATA_EXTENSION);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
|
|
@ -30,17 +30,17 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Test if a datanode can correctly upgrade itself */
|
||||
public class TestDatanodeRestart {
|
||||
|
@ -98,8 +98,9 @@ public class TestDatanodeRestart {
|
|||
out.write(writeBuf);
|
||||
out.hflush();
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
for (FSVolume volume : ((FSDataset)dn.data).volumes.getVolumes()) {
|
||||
File currentDir = volume.getDir().getParentFile();
|
||||
for (FSVolumeInterface v : dn.data.getVolumes()) {
|
||||
FSVolume volume = (FSVolume)v;
|
||||
File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
|
||||
File rbwDir = new File(currentDir, "rbw");
|
||||
for (File file : rbwDir.listFiles()) {
|
||||
if (isCorrupt && Block.isBlockFilename(file)) {
|
||||
|
@ -188,7 +189,7 @@ public class TestDatanodeRestart {
|
|||
} else {
|
||||
src = replicaInfo.getMetaFile();
|
||||
}
|
||||
File dst = FSDataset.getUnlinkTmpFile(src);
|
||||
File dst = DatanodeUtil.getUnlinkTmpFile(src);
|
||||
if (isRename) {
|
||||
src.renameTo(dst);
|
||||
} else {
|
||||
|
|
|
@ -25,20 +25,20 @@ import java.util.LinkedList;
|
|||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
|
||||
/**
|
||||
* Tests {@link DirectoryScanner} handling of differences
|
||||
|
@ -142,10 +142,10 @@ public class TestDirectoryScanner extends TestCase {
|
|||
|
||||
/** Create a block file in a random volume*/
|
||||
private long createBlockFile() throws IOException {
|
||||
List<FSVolume> volumes = fds.volumes.getVolumes();
|
||||
List<FSVolumeInterface> volumes = fds.getVolumes();
|
||||
int index = rand.nextInt(volumes.size() - 1);
|
||||
long id = getFreeBlockId();
|
||||
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
|
||||
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
|
||||
File file = new File(finalizedDir, getBlockFile(id));
|
||||
if (file.createNewFile()) {
|
||||
LOG.info("Created block file " + file.getName());
|
||||
|
@ -155,10 +155,10 @@ public class TestDirectoryScanner extends TestCase {
|
|||
|
||||
/** Create a metafile in a random volume*/
|
||||
private long createMetaFile() throws IOException {
|
||||
List<FSVolume> volumes = fds.volumes.getVolumes();
|
||||
List<FSVolumeInterface> volumes = fds.getVolumes();
|
||||
int index = rand.nextInt(volumes.size() - 1);
|
||||
long id = getFreeBlockId();
|
||||
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
|
||||
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
|
||||
File file = new File(finalizedDir, getMetaFile(id));
|
||||
if (file.createNewFile()) {
|
||||
LOG.info("Created metafile " + file.getName());
|
||||
|
@ -168,10 +168,10 @@ public class TestDirectoryScanner extends TestCase {
|
|||
|
||||
/** Create block file and corresponding metafile in a rondom volume */
|
||||
private long createBlockMetaFile() throws IOException {
|
||||
List<FSVolume> volumes = fds.volumes.getVolumes();
|
||||
List<FSVolumeInterface> volumes = fds.getVolumes();
|
||||
int index = rand.nextInt(volumes.size() - 1);
|
||||
long id = getFreeBlockId();
|
||||
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
|
||||
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
|
||||
File file = new File(finalizedDir, getBlockFile(id));
|
||||
if (file.createNewFile()) {
|
||||
LOG.info("Created block file " + file.getName());
|
||||
|
|
|
@ -21,10 +21,10 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.Assert;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
|
@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy {
|
|||
// Test the Round-Robin block-volume choosing algorithm.
|
||||
@Test
|
||||
public void testRR() throws Exception {
|
||||
final List<FSVolume> volumes = new ArrayList<FSVolume>();
|
||||
final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
|
||||
|
||||
// First volume, with 100 bytes of space.
|
||||
volumes.add(Mockito.mock(FSVolume.class));
|
||||
volumes.add(Mockito.mock(FSVolumeInterface.class));
|
||||
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
|
||||
|
||||
// Second volume, with 200 bytes of space.
|
||||
volumes.add(Mockito.mock(FSVolume.class));
|
||||
volumes.add(Mockito.mock(FSVolumeInterface.class));
|
||||
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
|
||||
|
||||
RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
|
||||
|
@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy {
|
|||
@Test
|
||||
public void testRRPolicyExceptionMessage()
|
||||
throws Exception {
|
||||
final List<FSVolume> volumes = new ArrayList<FSVolume>();
|
||||
final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
|
||||
|
||||
// First volume, with 500 bytes of space.
|
||||
volumes.add(Mockito.mock(FSVolume.class));
|
||||
volumes.add(Mockito.mock(FSVolumeInterface.class));
|
||||
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
|
||||
|
||||
// Second volume, with 600 bytes of space.
|
||||
volumes.add(Mockito.mock(FSVolume.class));
|
||||
volumes.add(Mockito.mock(FSVolumeInterface.class));
|
||||
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
|
||||
|
||||
RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();
|
||||
|
|
|
@ -140,7 +140,7 @@ public class TestWriteToReplica {
|
|||
ReplicasMap replicasMap = dataSet.volumeMap;
|
||||
FSVolume vol = dataSet.volumes.getNextVolume(0);
|
||||
ReplicaInfo replicaInfo = new FinalizedReplica(
|
||||
blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
|
||||
blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
|
||||
replicasMap.add(bpid, replicaInfo);
|
||||
replicaInfo.getBlockFile().createNewFile();
|
||||
replicaInfo.getMetaFile().createNewFile();
|
||||
|
@ -160,15 +160,15 @@ public class TestWriteToReplica {
|
|||
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
|
||||
blocks[RWR].getLocalBlock()).getParentFile()));
|
||||
replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
|
||||
.getLocalBlock(), vol, vol.getDir()), 2007));
|
||||
.getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));
|
||||
|
||||
return blocks;
|
||||
}
|
||||
|
||||
private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
|
||||
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
|
||||
FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
|
||||
.getVolume();
|
||||
final FSVolume v = (FSVolume)dataSet.volumeMap.get(
|
||||
bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
|
||||
long available = v.getCapacity()-v.getDfsUsed();
|
||||
long expectedLen = blocks[FINALIZED].getNumBytes();
|
||||
try {
|
||||
|
|
Loading…
Reference in New Issue