diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c116c5b19ea..c8f1af21ff4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -201,8 +201,15 @@ Trunk (unreleased changes)
Release 0.23.2 - UNRELEASED
- NEW FEATURES
-
+ INCOMPATIBLE CHANGES
+
+ HDFS-2887. FSVolume, is a part of FSDatasetInterface implementation, should
+ not be referred outside FSDataset. A new FSVolumeInterface is defined.
+ The BlockVolumeChoosingPolicy.chooseVolume(..) method signature is also
+ updated. (szetszwo)
+
+ NEW FEATURES
+
IMPROVEMENTS
OPTIMIZATIONS
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
index 0a42d0e951b..e3709463b41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
/**
* Performs two types of scanning:
*
Gets block files from the data directories and reconciles the
- * difference between the blocks on the disk and in memory in
- * {@link FSDataset}
+ * difference between the blocks on the disk and in memory.
* Scans the data directories for block files under a block pool
* and verifies that the files are not corrupt
* This keeps track of blocks and their last verification times.
@@ -78,7 +77,7 @@ class BlockPoolSliceScanner {
private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
private DataNode datanode;
- private FSDataset dataset;
+ private final FSDatasetInterface dataset;
// sorted set
private TreeSet blockInfoSet;
@@ -137,8 +136,8 @@ class BlockPoolSliceScanner {
}
}
- BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf,
- String bpid) {
+ BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
+ Configuration conf, String bpid) {
this.datanode = datanode;
this.dataset = dataset;
this.blockPoolId = bpid;
@@ -220,16 +219,16 @@ class BlockPoolSliceScanner {
* otherwise, pick the first directory.
*/
File dir = null;
- List volumes = dataset.volumes.getVolumes();
- for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
- File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory();
+ List volumes = dataset.getVolumes();
+ for (FSVolumeInterface vol : volumes) {
+ File bpDir = vol.getDirectory(blockPoolId);
if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
dir = bpDir;
break;
}
}
if (dir == null) {
- dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory();
+ dir = volumes.get(0).getDirectory(blockPoolId);
}
try {
@@ -577,8 +576,8 @@ class BlockPoolSliceScanner {
bytesLeft += len;
}
- static File getCurrentFile(FSVolume vol, String bpid) throws IOException {
- return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(),
+ static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
+ return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
BlockPoolSliceScanner.verificationLogFile);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
index dc4a3d94430..49e0f464d91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
@@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**************************************************
* BlockVolumeChoosingPolicy allows a DataNode to
@@ -46,7 +46,7 @@ public interface BlockVolumeChoosingPolicy {
* @return the chosen volume to store the block.
* @throws IOException when disks are unavailable or are full.
*/
- public FSVolume chooseVolume(List volumes, long blockSize)
+ public FSVolumeInterface chooseVolume(List volumes, long blockSize)
throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
index e14aaf63c7c..c0d0bff23c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
@@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
/**
* DataBlockScanner manages block scanning for all the block pools. For each
@@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
public class DataBlockScanner implements Runnable {
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
private final DataNode datanode;
- private final FSDataset dataset;
+ private final FSDatasetInterface dataset;
private final Configuration conf;
/**
@@ -55,7 +55,7 @@ public class DataBlockScanner implements Runnable {
new TreeMap();
Thread blockScannerThread = null;
- DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
+ DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
this.datanode = datanode;
this.dataset = dataset;
this.conf = conf;
@@ -135,7 +135,7 @@ public class DataBlockScanner implements Runnable {
.iterator();
while (bpidIterator.hasNext()) {
String bpid = bpidIterator.next();
- for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
+ for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
try {
File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
if (currFile.exists()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 33070e7de3c..b7a91696b64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -126,7 +126,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@@ -580,11 +579,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
- } else if (!(data instanceof FSDataset)) {
- reason = "verifcation is supported only with FSDataset";
+ } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+ reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
- blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
+ blockScanner = new DataBlockScanner(this, data, conf);
blockScanner.start();
} else {
LOG.info("Periodic Block Verification scan is disabled because " +
@@ -609,11 +608,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
- } else if (!(data instanceof FSDataset)) {
- reason = "verification is supported only with FSDataset";
+ } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+ reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
- directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
+ directoryScanner = new DirectoryScanner(this, data, conf);
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@@ -2235,16 +2234,7 @@ public class DataNode extends Configured
*/
@Override // DataNodeMXBean
public String getVolumeInfo() {
- final Map info = new HashMap();
- Collection volumes = ((FSDataset)this.data).getVolumeInfo();
- for (VolumeInfo v : volumes) {
- final Map innerInfo = new HashMap();
- innerInfo.put("usedSpace", v.usedSpace);
- innerInfo.put("freeSpace", v.freeSpace);
- innerInfo.put("reservedSpace", v.reservedSpace);
- info.put(v.directory, innerInfo);
- }
- return JSON.toString(info);
+ return JSON.toString(data.getVolumeInfoMap());
}
@Override // DataNodeMXBean
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 488c0188c3b..64349d86c40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -751,7 +751,7 @@ public class DataStorage extends Storage {
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
if (matcher.matches()) {
//return the current metadata file name
- return FSDataset.getMetaFileName(matcher.group(1),
+ return DatanodeUtil.getMetaFileName(matcher.group(1),
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
}
return oldFileName;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
index cb0767c93a2..c59929edd6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
@@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
+import java.io.FilenameFilter;
import java.io.IOException;
+import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -26,6 +28,10 @@ import org.apache.hadoop.hdfs.protocol.Block;
/** Provide utility methods for Datanode. */
@InterfaceAudience.Private
class DatanodeUtil {
+ static final String METADATA_EXTENSION = ".meta";
+
+ static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
+
private final static String DISK_ERROR = "Possible disk error on file creation: ";
/** Get the cause of an I/O exception if caused by a possible disk error
@@ -64,4 +70,37 @@ class DatanodeUtil {
}
return f;
}
+
+ static String getMetaFileName(String blockFileName, long genStamp) {
+ return blockFileName + "_" + genStamp + METADATA_EXTENSION;
+ }
+
+ static File getMetaFile(File f, long genStamp) {
+ return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+ }
+
+ /** Find the corresponding meta data file from a given block file */
+ static File findMetaFile(final File blockFile) throws IOException {
+ final String prefix = blockFile.getName() + "_";
+ final File parent = blockFile.getParentFile();
+ File[] matches = parent.listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return dir.equals(parent)
+ && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
+ }
+ });
+
+ if (matches == null || matches.length == 0) {
+ throw new IOException("Meta file not found, blockFile=" + blockFile);
+ }
+ else if (matches.length > 1) {
+ throw new IOException("Found more than one meta files: "
+ + Arrays.asList(matches));
+ }
+ return matches[0];
+ }
+
+ static File getUnlinkTmpFile(File f) {
+ return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index d7c0a930965..40b51a28b2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.Daemon;
/**
* Periodically scans the data directories for block and block metadata files.
- * Reconciles the differences with block information maintained in
- * {@link FSDataset}
+ * Reconciles the differences with block information maintained in the dataset.
*/
@InterfaceAudience.Private
public class DirectoryScanner implements Runnable {
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
private final DataNode datanode;
- private final FSDataset dataset;
+ private final FSDatasetInterface dataset;
private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread;
private final long scanPeriodMsecs;
@@ -158,13 +157,13 @@ public class DirectoryScanner implements Runnable {
private final long blockId;
private final File metaFile;
private final File blockFile;
- private final FSVolume volume;
+ private final FSVolumeInterface volume;
ScanInfo(long blockId) {
this(blockId, null, null, null);
}
- ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) {
+ ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
this.blockId = blockId;
this.metaFile = metaFile;
this.blockFile = blockFile;
@@ -183,7 +182,7 @@ public class DirectoryScanner implements Runnable {
return blockId;
}
- FSVolume getVolume() {
+ FSVolumeInterface getVolume() {
return volume;
}
@@ -220,7 +219,7 @@ public class DirectoryScanner implements Runnable {
}
}
- DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
+ DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
this.datanode = dn;
this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@@ -269,7 +268,7 @@ public class DirectoryScanner implements Runnable {
return;
}
- String[] bpids = dataset.getBPIdlist();
+ String[] bpids = dataset.getBlockPoolList();
for(String bpid : bpids) {
UpgradeManagerDatanode um =
datanode.getUpgradeManagerDatanode(bpid);
@@ -411,17 +410,29 @@ public class DirectoryScanner implements Runnable {
diffRecord.add(new ScanInfo(blockId));
}
+ /** Is the given volume still valid in the dataset? */
+ private static boolean isValid(final FSDatasetInterface dataset,
+ final FSVolumeInterface volume) {
+ for (FSVolumeInterface vol : dataset.getVolumes()) {
+ if (vol == volume) {
+ return true;
+ }
+ }
+ return false;
+ }
+
/** Get lists of blocks on the disk sorted by blockId, per blockpool */
private Map getDiskReport() {
// First get list of data directories
- List volumes = dataset.volumes.getVolumes();
+ final List volumes = dataset.getVolumes();
ArrayList dirReports =
new ArrayList(volumes.size());
Map> compilersInProgress =
new HashMap>();
for (int i = 0; i < volumes.size(); i++) {
- if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+ if (!isValid(dataset, volumes.get(i))) {
+ // volume is invalid
dirReports.add(i, null);
} else {
ReportCompiler reportCompiler =
@@ -446,7 +457,8 @@ public class DirectoryScanner implements Runnable {
// Compile consolidated report for all the volumes
ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
for (int i = 0; i < volumes.size(); i++) {
- if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+ if (isValid(dataset, volumes.get(i))) {
+ // volume is still valid
list.addAll(dirReports.get(i));
}
}
@@ -461,9 +473,9 @@ public class DirectoryScanner implements Runnable {
private static class ReportCompiler
implements Callable {
- private FSVolume volume;
+ private FSVolumeInterface volume;
- public ReportCompiler(FSVolume volume) {
+ public ReportCompiler(FSVolumeInterface volume) {
this.volume = volume;
}
@@ -473,14 +485,14 @@ public class DirectoryScanner implements Runnable {
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
for (String bpid : bpList) {
LinkedList report = new LinkedList();
- File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir();
+ File bpFinalizedDir = volume.getFinalizedDir(bpid);
result.put(bpid, compileReport(volume, bpFinalizedDir, report));
}
return result;
}
/** Compile list {@link ScanInfo} for the blocks in the directory */
- private LinkedList compileReport(FSVolume vol, File dir,
+ private LinkedList compileReport(FSVolumeInterface vol, File dir,
LinkedList report) {
File[] files;
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
index 343f13703fd..692c0f6b3a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
@@ -23,7 +23,6 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
-import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
@@ -81,14 +80,13 @@ class FSDataset implements FSDatasetInterface {
* A node type that can be built into a tree reflecting the
* hierarchy of blocks on the local disk.
*/
- class FSDir {
- File dir;
+ private class FSDir {
+ final File dir;
int numBlocks = 0;
FSDir children[];
int lastChildIdx = 0;
- /**
- */
- public FSDir(File dir)
+
+ private FSDir(File dir)
throws IOException {
this.dir = dir;
this.children = null;
@@ -113,7 +111,7 @@ class FSDataset implements FSDatasetInterface {
}
}
- public File addBlock(Block b, File src) throws IOException {
+ private File addBlock(Block b, File src) throws IOException {
//First try without creating subdirectories
File file = addBlock(b, src, false, false);
return (file != null) ? file : addBlock(b, src, true, true);
@@ -161,7 +159,7 @@ class FSDataset implements FSDatasetInterface {
return children[ lastChildIdx ].addBlock(b, src, true, false);
}
- void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
+ private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
throws IOException {
if (children != null) {
for (int i = 0; i < children.length; i++) {
@@ -207,7 +205,7 @@ class FSDataset implements FSDatasetInterface {
* check if a data diretory is healthy
* @throws DiskErrorException
*/
- public void checkDirTree() throws DiskErrorException {
+ private void checkDirTree() throws DiskErrorException {
DiskChecker.checkDir(dir);
if (children != null) {
@@ -217,7 +215,7 @@ class FSDataset implements FSDatasetInterface {
}
}
- void clearPath(File f) {
+ private void clearPath(File f) {
String root = dir.getAbsolutePath();
String dir = f.getAbsolutePath();
if (dir.startsWith(root)) {
@@ -270,7 +268,8 @@ class FSDataset implements FSDatasetInterface {
}
return false;
}
-
+
+ @Override
public String toString() {
return "FSDir{" +
"dir=" + dir +
@@ -284,7 +283,7 @@ class FSDataset implements FSDatasetInterface {
* Taken together, all BlockPoolSlices sharing a block pool ID across a
* cluster represent a single block pool.
*/
- class BlockPoolSlice {
+ private class BlockPoolSlice {
private final String bpid;
private final FSVolume volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current
@@ -342,11 +341,7 @@ class FSDataset implements FSDatasetInterface {
File getDirectory() {
return currentDir.getParentFile();
}
-
- File getCurrentDir() {
- return currentDir;
- }
-
+
File getFinalizedDir() {
return finalizedDir.dir;
}
@@ -387,7 +382,7 @@ class FSDataset implements FSDatasetInterface {
File addBlock(Block b, File f) throws IOException {
File blockFile = finalizedDir.addBlock(b, f);
- File metaFile = getMetaFile(blockFile , b.getGenerationStamp());
+ File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
@@ -455,7 +450,7 @@ class FSDataset implements FSDatasetInterface {
DataInputStream checksumIn = null;
InputStream blockIn = null;
try {
- File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp));
+ final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
@@ -521,7 +516,7 @@ class FSDataset implements FSDatasetInterface {
}
}
- class FSVolume {
+ class FSVolume implements FSVolumeInterface {
private final Map map = new HashMap();
private final File currentDir; // /current
private final DF usage;
@@ -534,11 +529,6 @@ class FSDataset implements FSDatasetInterface {
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
}
-
- /** Return storage directory corresponding to the volume */
- File getDir() {
- return currentDir.getParentFile();
- }
File getCurrentDir() {
return currentDir;
@@ -583,8 +573,9 @@ class FSDataset implements FSDatasetInterface {
long remaining = usage.getCapacity() - reserved;
return remaining > 0 ? remaining : 0;
}
-
- long getAvailable() throws IOException {
+
+ @Override
+ public long getAvailable() throws IOException {
long remaining = getCapacity()-getDfsUsed();
long available = usage.getAvailable();
if (remaining>available) {
@@ -600,19 +591,30 @@ class FSDataset implements FSDatasetInterface {
String getMount() throws IOException {
return usage.getMount();
}
-
- BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
+
+ private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
BlockPoolSlice bp = map.get(bpid);
if (bp == null) {
throw new IOException("block pool " + bpid + " is not found");
}
return bp;
}
-
+
+ @Override
+ public File getDirectory(String bpid) throws IOException {
+ return getBlockPoolSlice(bpid).getDirectory();
+ }
+
+ @Override
+ public File getFinalizedDir(String bpid) throws IOException {
+ return getBlockPoolSlice(bpid).getFinalizedDir();
+ }
+
/**
* Make a deep copy of the list of currently active BPIDs
*/
- String[] getBlockPoolList() {
+ @Override
+ public String[] getBlockPoolList() {
synchronized(FSDataset.this) {
return map.keySet().toArray(new String[map.keySet().size()]);
}
@@ -681,7 +683,8 @@ class FSDataset implements FSDatasetInterface {
BlockPoolSlice bp = getBlockPoolSlice(bpid);
bp.clearPath(f);
}
-
+
+ @Override
public String toString() {
return currentDir.getAbsolutePath();
}
@@ -773,21 +776,18 @@ class FSDataset implements FSDatasetInterface {
* Read access to this unmodifiable list is not synchronized.
* This list is replaced on modification holding "this" lock.
*/
- private volatile List volumes = null;
+ private volatile List volumes = null;
+
BlockVolumeChoosingPolicy blockChooser;
int numFailedVolumes;
- FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
- List list = Arrays.asList(volumes);
- this.volumes = Collections.unmodifiableList(list);
+ FSVolumeSet(List volumes, int failedVols,
+ BlockVolumeChoosingPolicy blockChooser) {
+ this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
- private int numberOfVolumes() {
- return volumes.size();
- }
-
private int numberOfFailedVolumes() {
return numFailedVolumes;
}
@@ -800,36 +800,36 @@ class FSDataset implements FSDatasetInterface {
* @return next volume to store the block in.
*/
synchronized FSVolume getNextVolume(long blockSize) throws IOException {
- return blockChooser.chooseVolume(volumes, blockSize);
+ return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
}
private long getDfsUsed() throws IOException {
long dfsUsed = 0L;
- for (FSVolume vol : volumes) {
- dfsUsed += vol.getDfsUsed();
+ for (FSVolumeInterface v : volumes) {
+ dfsUsed += ((FSVolume)v).getDfsUsed();
}
return dfsUsed;
}
private long getBlockPoolUsed(String bpid) throws IOException {
long dfsUsed = 0L;
- for (FSVolume vol : volumes) {
- dfsUsed += vol.getBlockPoolUsed(bpid);
+ for (FSVolumeInterface v : volumes) {
+ dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
}
return dfsUsed;
}
private long getCapacity() {
long capacity = 0L;
- for (FSVolume vol : volumes) {
- capacity += vol.getCapacity();
+ for (FSVolumeInterface v : volumes) {
+ capacity += ((FSVolume)v).getCapacity();
}
return capacity;
}
private long getRemaining() throws IOException {
long remaining = 0L;
- for (FSVolume vol : volumes) {
+ for (FSVolumeInterface vol : volumes) {
remaining += vol.getAvailable();
}
return remaining;
@@ -837,15 +837,15 @@ class FSDataset implements FSDatasetInterface {
private void getVolumeMap(ReplicasMap volumeMap)
throws IOException {
- for (FSVolume vol : volumes) {
- vol.getVolumeMap(volumeMap);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).getVolumeMap(volumeMap);
}
}
private void getVolumeMap(String bpid, ReplicasMap volumeMap)
throws IOException {
- for (FSVolume vol : volumes) {
- vol.getVolumeMap(bpid, volumeMap);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).getVolumeMap(bpid, volumeMap);
}
}
@@ -861,10 +861,10 @@ class FSDataset implements FSDatasetInterface {
ArrayList removedVols = null;
// Make a copy of volumes for performing modification
- List volumeList = new ArrayList(getVolumes());
+ final List volumeList = new ArrayList(volumes);
for (int idx = 0; idx < volumeList.size(); idx++) {
- FSVolume fsv = volumeList.get(idx);
+ FSVolume fsv = (FSVolume)volumeList.get(idx);
try {
fsv.checkDirs();
} catch (DiskErrorException e) {
@@ -881,8 +881,8 @@ class FSDataset implements FSDatasetInterface {
// Remove null volumes from the volumes array
if (removedVols != null && removedVols.size() > 0) {
- List newVols = new ArrayList();
- for (FSVolume vol : volumeList) {
+ List newVols = new ArrayList();
+ for (FSVolumeInterface vol : volumeList) {
if (vol != null) {
newVols.add(vol);
}
@@ -895,44 +895,30 @@ class FSDataset implements FSDatasetInterface {
return removedVols;
}
-
+
+ @Override
public String toString() {
return volumes.toString();
}
- boolean isValid(FSVolume volume) {
- for (FSVolume vol : volumes) {
- if (vol == volume) {
- return true;
- }
- }
- return false;
- }
private void addBlockPool(String bpid, Configuration conf)
throws IOException {
- for (FSVolume v : volumes) {
- v.addBlockPool(bpid, conf);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).addBlockPool(bpid, conf);
}
}
private void removeBlockPool(String bpid) {
- for (FSVolume v : volumes) {
- v.shutdownBlockPool(bpid);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).shutdownBlockPool(bpid);
}
}
-
- /**
- * @return unmodifiable list of volumes
- */
- public List getVolumes() {
- return volumes;
- }
private void shutdown() {
- for (FSVolume volume : volumes) {
+ for (FSVolumeInterface volume : volumes) {
if(volume != null) {
- volume.shutdown();
+ ((FSVolume)volume).shutdown();
}
}
}
@@ -944,35 +930,20 @@ class FSDataset implements FSDatasetInterface {
//
//////////////////////////////////////////////////////
- //Find better place?
- static final String METADATA_EXTENSION = ".meta";
- static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
-
private static boolean isUnlinkTmpFile(File f) {
String name = f.getName();
- return name.endsWith(UNLINK_BLOCK_SUFFIX);
- }
-
- static File getUnlinkTmpFile(File f) {
- return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+ return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
private static File getOrigFile(File unlinkTmpFile) {
String fileName = unlinkTmpFile.getName();
return new File(unlinkTmpFile.getParentFile(),
- fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length()));
- }
-
- static String getMetaFileName(String blockFileName, long genStamp) {
- return blockFileName + "_" + genStamp + METADATA_EXTENSION;
- }
-
- static File getMetaFile(File f , long genStamp) {
- return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+ fileName.substring(0,
+ fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
}
protected File getMetaFile(ExtendedBlock b) throws IOException {
- return getMetaFile(getBlockFile(b), b.getGenerationStamp());
+ return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
}
/** Find the metadata file for the specified block file.
@@ -994,34 +965,13 @@ class FSDataset implements FSDatasetInterface {
" does not have a metafile!");
return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
-
- /** Find the corresponding meta data file from a given block file */
- private static File findMetaFile(final File blockFile) throws IOException {
- final String prefix = blockFile.getName() + "_";
- final File parent = blockFile.getParentFile();
- File[] matches = parent.listFiles(new FilenameFilter() {
- public boolean accept(File dir, String name) {
- return dir.equals(parent)
- && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
- }
- });
-
- if (matches == null || matches.length == 0) {
- throw new IOException("Meta file not found, blockFile=" + blockFile);
- }
- else if (matches.length > 1) {
- throw new IOException("Found more than one meta files: "
- + Arrays.asList(matches));
- }
- return matches[0];
- }
/** Find the corresponding meta data file from a given block file */
private static long parseGenerationStamp(File blockFile, File metaFile
) throws IOException {
String metaname = metaFile.getName();
String gs = metaname.substring(blockFile.getName().length() + 1,
- metaname.length() - METADATA_EXTENSION.length());
+ metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
try {
return Long.parseLong(gs);
} catch(NumberFormatException nfe) {
@@ -1030,6 +980,11 @@ class FSDataset implements FSDatasetInterface {
}
}
+ @Override // FSDatasetInterface
+ public List getVolumes() {
+ return volumes.volumes;
+ }
+
@Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException {
@@ -1037,7 +992,7 @@ class FSDataset implements FSDatasetInterface {
if (blockfile == null) {
return null;
}
- File metafile = findMetaFile(blockfile);
+ final File metafile = DatanodeUtil.findMetaFile(blockfile);
return new Block(blkid, blockfile.length(),
parseGenerationStamp(blockfile, metafile));
}
@@ -1101,7 +1056,7 @@ class FSDataset implements FSDatasetInterface {
/**
* An FSDataset has a directory where it loads its data files.
*/
- public FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
+ FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
throws IOException {
this.datanode = datanode;
this.maxBlocksPerDir =
@@ -1134,12 +1089,12 @@ class FSDataset implements FSDatasetInterface {
+ ", volume failures tolerated: " + volFailuresTolerated);
}
- FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
+ final List volArray = new ArrayList(
+ storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
- volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
- conf);
- DataNode.LOG.info("FSDataset added volume - "
- + storage.getStorageDir(idx).getCurrentDir());
+ final File dir = storage.getStorageDir(idx).getCurrentDir();
+ volArray.add(new FSVolume(dir, conf));
+ DataNode.LOG.info("FSDataset added volume - " + dir);
}
volumeMap = new ReplicasMap(this);
@@ -1185,7 +1140,7 @@ class FSDataset implements FSDatasetInterface {
*/
@Override // FSDatasetInterface
public boolean hasEnoughResource() {
- return volumes.numberOfVolumes() >= validVolsRequired;
+ return getVolumes().size() >= validVolsRequired;
}
/**
@@ -1368,8 +1323,8 @@ class FSDataset implements FSDatasetInterface {
private static File moveBlockFiles(Block b, File srcfile, File destdir
) throws IOException {
final File dstfile = new File(destdir, b.getBlockName());
- final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
- final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
+ final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
+ final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
if (!srcmeta.renameTo(dstmeta)) {
throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta);
@@ -1487,7 +1442,7 @@ class FSDataset implements FSDatasetInterface {
// construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile();
- FSVolume v = replicaInfo.getVolume();
+ FSVolume v = (FSVolume)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo);
@@ -1744,7 +1699,7 @@ class FSDataset implements FSDatasetInterface {
+ visible + ", temp=" + temp);
}
// check volume
- final FSVolume v = temp.getVolume();
+ final FSVolume v = (FSVolume)temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
@@ -1805,7 +1760,7 @@ class FSDataset implements FSDatasetInterface {
if ( vol == null ) {
ReplicaInfo replica = volumeMap.get(bpid, blk);
if (replica != null) {
- vol = volumeMap.get(bpid, blk).getVolume();
+ vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
}
if ( vol == null ) {
throw new IOException("Could not find volume for block " + blk);
@@ -1845,7 +1800,7 @@ class FSDataset implements FSDatasetInterface {
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
} else {
- FSVolume v = replicaInfo.getVolume();
+ FSVolume v = (FSVolume)replicaInfo.getVolume();
File f = replicaInfo.getBlockFile();
if (v == null) {
throw new IOException("No volume for temporary file " + f +
@@ -1943,7 +1898,8 @@ class FSDataset implements FSDatasetInterface {
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
- synchronized List getFinalizedBlocks(String bpid) {
+ @Override
+ public synchronized List getFinalizedBlocks(String bpid) {
ArrayList finalized = new ArrayList(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(b.getState() == ReplicaState.FINALIZED) {
@@ -2016,7 +1972,7 @@ class FSDataset implements FSDatasetInterface {
}
//check replica's meta file
- final File metafile = getMetaFile(f, r.getGenerationStamp());
+ final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
if (!metafile.exists()) {
throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
}
@@ -2047,7 +2003,7 @@ class FSDataset implements FSDatasetInterface {
error = true;
continue;
}
- v = dinfo.getVolume();
+ v = (FSVolume)dinfo.getVolume();
if (f == null) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ invalidBlks[i] +
@@ -2081,7 +2037,7 @@ class FSDataset implements FSDatasetInterface {
}
volumeMap.remove(bpid, invalidBlks[i]);
}
- File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
+ File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
// Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, f, metaFile,
@@ -2238,8 +2194,9 @@ class FSDataset implements FSDatasetInterface {
* @param diskMetaFile Metadata file from on the disk
* @param vol Volume of the block file
*/
+ @Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
- File diskMetaFile, FSVolume vol) {
+ File diskMetaFile, FSVolumeInterface vol) {
Block corruptBlock = null;
ReplicaInfo memBlockInfo;
synchronized (this) {
@@ -2327,7 +2284,7 @@ class FSDataset implements FSDatasetInterface {
// Compare generation stamp
if (memBlockInfo.getGenerationStamp() != diskGS) {
- File memMetaFile = getMetaFile(diskFile,
+ File memMetaFile = DatanodeUtil.getMetaFile(diskFile,
memBlockInfo.getGenerationStamp());
if (memMetaFile.exists()) {
if (memMetaFile.compareTo(diskMetaFile) != 0) {
@@ -2562,18 +2519,15 @@ class FSDataset implements FSDatasetInterface {
volumes.removeBlockPool(bpid);
}
- /**
- * get list of all bpids
- * @return list of bpids
- */
- public String [] getBPIdlist() {
+ @Override
+ public String[] getBlockPoolList() {
return volumeMap.getBlockPoolList();
}
/**
* Class for representing the Datanode volume information
*/
- static class VolumeInfo {
+ private static class VolumeInfo {
final String directory;
final long usedSpace;
final long freeSpace;
@@ -2586,10 +2540,11 @@ class FSDataset implements FSDatasetInterface {
this.reservedSpace = reservedSpace;
}
}
-
- Collection getVolumeInfo() {
+
+ private Collection getVolumeInfo() {
Collection info = new ArrayList();
- for (FSVolume volume : volumes.volumes) {
+ for (FSVolumeInterface v : volumes.volumes) {
+ final FSVolume volume = (FSVolume)v;
long used = 0;
long free = 0;
try {
@@ -2606,13 +2561,27 @@ class FSDataset implements FSDatasetInterface {
}
return info;
}
-
+
+ @Override
+ public Map getVolumeInfoMap() {
+ final Map info = new HashMap();
+ Collection volumes = getVolumeInfo();
+ for (VolumeInfo v : volumes) {
+ final Map innerInfo = new HashMap();
+ innerInfo.put("usedSpace", v.usedSpace);
+ innerInfo.put("freeSpace", v.freeSpace);
+ innerInfo.put("reservedSpace", v.reservedSpace);
+ info.put(v.directory, innerInfo);
+ }
+ return info;
+ }
+
@Override //FSDatasetInterface
public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException {
if (!force) {
- for (FSVolume volume : volumes.volumes) {
- if (!volume.isBPDirEmpty(bpid)) {
+ for (FSVolumeInterface volume : volumes.volumes) {
+ if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
DataNode.LOG.warn(bpid
+ " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, "
@@ -2620,8 +2589,8 @@ class FSDataset implements FSDatasetInterface {
}
}
}
- for (FSVolume volume : volumes.volumes) {
- volume.deleteBPDirectories(bpid, force);
+ for (FSVolumeInterface volume : volumes.volumes) {
+ ((FSVolume)volume).deleteBPDirectories(bpid, force);
}
}
@@ -2629,7 +2598,7 @@ class FSDataset implements FSDatasetInterface {
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
File datafile = getBlockFile(block);
- File metafile = getMetaFile(datafile, block.getGenerationStamp());
+ File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
datafile.getAbsolutePath(), metafile.getAbsolutePath());
return info;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
index 627ac27f95b..5006c4ad5be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
@@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.Closeable;
+import java.io.File;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -46,8 +49,44 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
*/
@InterfaceAudience.Private
public interface FSDatasetInterface extends FSDatasetMBean {
-
-
+ /**
+ * This is an interface for the underlying volume.
+ * @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
+ */
+ interface FSVolumeInterface {
+ /** @return a list of block pools. */
+ public String[] getBlockPoolList();
+
+ /** @return the available storage space in bytes. */
+ public long getAvailable() throws IOException;
+
+ /** @return the directory for the block pool. */
+ public File getDirectory(String bpid) throws IOException;
+
+ /** @return the directory for the finalized blocks in the block pool. */
+ public File getFinalizedDir(String bpid) throws IOException;
+ }
+
+ /** @return a list of volumes. */
+ public List getVolumes();
+
+ /** @return a volume information map (name => info). */
+ public Map getVolumeInfoMap();
+
+ /** @return a list of block pools. */
+ public String[] getBlockPoolList();
+
+ /** @return a list of finalized blocks for the given block pool. */
+ public List getFinalizedBlocks(String bpid);
+
+ /**
+ * Check whether the in-memory block record matches the block on the disk,
+ * and, in case that they are not matched, update the record or mark it
+ * as corrupted.
+ */
+ public void checkAndUpdate(String bpid, long blockId, File diskFile,
+ File diskMetaFile, FSVolumeInterface vol);
+
/**
* Returns the length of the metadata file of the specified block
* @param b - the block for which the metadata length is desired
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
index d0fc32c7693..6aa10db2f26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class describes a replica that has been finalized.
@@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaInfo {
* @param dir directory path where block and meta files are located
*/
FinalizedReplica(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaInfo {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- FinalizedReplica(Block block, FSVolume vol, File dir) {
+ FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
index d2a6f46c2ed..f6458508cb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** This class represents replicas being written.
* Those are the replicas that
@@ -36,7 +36,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param dir directory path where block and meta files are located
*/
ReplicaBeingWritten(long blockId, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super( blockId, genStamp, vol, dir);
}
@@ -48,7 +48,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(Block block,
- FSVolume vol, File dir, Thread writer) {
+ FSVolumeInterface vol, File dir, Thread writer) {
super( block, vol, dir, writer);
}
@@ -62,7 +62,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(long blockId, long len, long genStamp,
- FSVolume vol, File dir, Thread writer ) {
+ FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir, writer);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index c20b0090d2d..82851c9f47a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
@@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param state replica state
*/
ReplicaInPipeline(long blockId, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
}
@@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(Block block,
- FSVolume vol, File dir, Thread writer) {
+ FSVolumeInterface vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer);
}
@@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(long blockId, long len, long genStamp,
- FSVolume vol, File dir, Thread writer ) {
+ FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len;
this.bytesOnDisk = len;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 3adc0ccc8a6..65da8c7698e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
/**
@@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
*/
@InterfaceAudience.Private
abstract public class ReplicaInfo extends Block implements Replica {
- private FSVolume volume; // volume where the replica belongs
- private File dir; // directory where block & meta files belong
+ /** volume where the replica belongs */
+ private FSVolumeInterface volume;
+ /** directory where block & meta files belong */
+ private File dir;
/**
* Constructor for a zero length replica
@@ -45,7 +47,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) {
+ ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir);
}
@@ -55,7 +57,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- ReplicaInfo(Block block, FSVolume vol, File dir) {
+ ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir);
}
@@ -69,7 +71,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp);
this.volume = vol;
this.dir = dir;
@@ -111,14 +113,14 @@ abstract public class ReplicaInfo extends Block implements Replica {
* Get the volume where this replica is located on disk
* @return the volume where this replica is located on disk
*/
- FSVolume getVolume() {
+ FSVolumeInterface getVolume() {
return volume;
}
/**
* Set the volume where this replica is located on disk
*/
- void setVolume(FSVolume vol) {
+ void setVolume(FSVolumeInterface vol) {
this.volume = vol;
}
@@ -162,7 +164,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* be recovered (especially on Windows) on datanode restart.
*/
private void unlinkFile(File file, Block b) throws IOException {
- File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
+ File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
try {
FileInputStream in = new FileInputStream(file);
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
index 972353962c3..635bf831b34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
/**
@@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends ReplicaInfo {
}
@Override //ReplicaInfo
- void setVolume(FSVolume vol) {
+ void setVolume(FSVolumeInterface vol) {
super.setVolume(vol);
original.setVolume(vol);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
index 91045b7ea55..d37a06cdfd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class represents a replica that is waiting to be recovered.
@@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
* @param dir directory path where block and meta files are located
*/
ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) {
+ ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
index d6168c00cdd..1463287268f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
@@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
private int curVolume = 0;
@Override
- public synchronized FSVolume chooseVolume(List volumes, long blockSize)
- throws IOException {
+ public synchronized FSVolumeInterface chooseVolume(
+ List volumes, long blockSize) throws IOException {
if(volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
@@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
long maxAvailable = 0;
while (true) {
- FSVolume volume = volumes.get(curVolume);
+ FSVolumeInterface volume = volumes.get(curVolume);
curVolume = (curVolume + 1) % volumes.size();
long availableVolumeSize = volume.getAvailable();
if (availableVolumeSize > blockSize) { return volume; }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 2aa93ee036f..99cc1ad0d83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -17,12 +17,14 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.Random;
@@ -38,11 +40,10 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -988,8 +989,33 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
}
@Override
- public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
- throws IOException {
- throw new IOException("getBlockLocalPathInfo not supported.");
+ public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String[] getBlockPoolList() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void checkAndUpdate(String bpid, long blockId, File diskFile,
+ File diskMetaFile, FSVolumeInterface vol) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List getVolumes() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List getFinalizedBlocks(String bpid) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Map getVolumeInfoMap() {
+ throw new UnsupportedOperationException();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 7b26f4e805a..4a84ce87aba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -23,7 +23,7 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import java.io.IOException;
-import java.util.Collection;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.Assert;
@@ -81,11 +80,11 @@ public class TestDataNodeMultipleRegistrations {
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
- Collection volInfos = ((FSDataset) dn.data).getVolumeInfo();
- assertNotNull("No volumes in the fsdataset", volInfos);
+ final Map volInfos = dn.data.getVolumeInfoMap();
+ Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
- for (VolumeInfo vi : volInfos) {
- LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+ for (Map.Entry e : volInfos.entrySet()) {
+ LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size());
@@ -143,11 +142,11 @@ public class TestDataNodeMultipleRegistrations {
// check number of vlumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
- Collection volInfos = ((FSDataset) dn.data).getVolumeInfo();
- assertNotNull("No volumes in the fsdataset", volInfos);
+ final Map volInfos = dn.data.getVolumeInfoMap();
+ Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
- for (VolumeInfo vi : volInfos) {
- LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+ for (Map.Entry e : volInfos.entrySet()) {
+ LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 86d63a36d77..1ebee2f89ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
@@ -29,8 +32,8 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -43,13 +46,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.net.NetUtils;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import static org.junit.Assert.*;
/**
* Fine-grain testing of block files and locations after volume failure.
@@ -274,8 +274,7 @@ public class TestDataNodeVolumeFailure {
String file = BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid",
block.getBlockId());
- BlockReader blockReader =
- BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
+ BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
.getBlockToken(), 0, -1);
// nothing - if it fails - it will throw and exception
@@ -372,7 +371,7 @@ public class TestDataNodeVolumeFailure {
new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith("blk_") &&
- name.endsWith(FSDataset.METADATA_EXTENSION);
+ name.endsWith(DatanodeUtil.METADATA_EXTENSION);
}
}
);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
index e6bd1ea8b11..9737a251d32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
@@ -30,17 +30,17 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.junit.Test;
import org.junit.Assert;
+import org.junit.Test;
/** Test if a datanode can correctly upgrade itself */
public class TestDatanodeRestart {
@@ -98,8 +98,9 @@ public class TestDatanodeRestart {
out.write(writeBuf);
out.hflush();
DataNode dn = cluster.getDataNodes().get(0);
- for (FSVolume volume : ((FSDataset)dn.data).volumes.getVolumes()) {
- File currentDir = volume.getDir().getParentFile();
+ for (FSVolumeInterface v : dn.data.getVolumes()) {
+ FSVolume volume = (FSVolume)v;
+ File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
if (isCorrupt && Block.isBlockFilename(file)) {
@@ -188,7 +189,7 @@ public class TestDatanodeRestart {
} else {
src = replicaInfo.getMetaFile();
}
- File dst = FSDataset.getUnlinkTmpFile(src);
+ File dst = DatanodeUtil.getUnlinkTmpFile(src);
if (isRename) {
src.renameTo(dst);
} else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 208a16ad1b9..1b0c158740f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -25,20 +25,20 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Random;
+import junit.framework.TestCase;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* Tests {@link DirectoryScanner} handling of differences
@@ -142,10 +142,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create a block file in a random volume*/
private long createBlockFile() throws IOException {
- List volumes = fds.volumes.getVolumes();
+ List volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
- File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+ File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
@@ -155,10 +155,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
- List volumes = fds.volumes.getVolumes();
+ List volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
- File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+ File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
@@ -168,10 +168,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
- List volumes = fds.volumes.getVolumes();
+ List volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
- File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+ File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
index 73d5900fee5..73937efbc39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
@@ -21,10 +21,10 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import junit.framework.Assert;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
@@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy {
// Test the Round-Robin block-volume choosing algorithm.
@Test
public void testRR() throws Exception {
- final List volumes = new ArrayList();
+ final List volumes = new ArrayList();
// First volume, with 100 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
// Second volume, with 200 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
@@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy {
@Test
public void testRRPolicyExceptionMessage()
throws Exception {
- final List volumes = new ArrayList();
+ final List volumes = new ArrayList();
// First volume, with 500 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
// Second volume, with 600 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
index b83abf1945e..4e5bd5dbee8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
@@ -140,7 +140,7 @@ public class TestWriteToReplica {
ReplicasMap replicasMap = dataSet.volumeMap;
FSVolume vol = dataSet.volumes.getNextVolume(0);
ReplicaInfo replicaInfo = new FinalizedReplica(
- blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
+ blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
@@ -160,15 +160,15 @@ public class TestWriteToReplica {
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
blocks[RWR].getLocalBlock()).getParentFile()));
replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
- .getLocalBlock(), vol, vol.getDir()), 2007));
+ .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));
return blocks;
}
private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
- FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
- .getVolume();
+ final FSVolume v = (FSVolume)dataSet.volumeMap.get(
+ bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {