From 9e31bf675dd92183a9a74a66b7caf1a080581d65 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 1 Mar 2012 21:58:39 +0000 Subject: [PATCH] HDFS-3021. Use generic type to declare FSDatasetInterface. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1295929 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../datanode/BlockPoolSliceScanner.java | 7 +- .../datanode/BlockVolumeChoosingPolicy.java | 7 +- .../server/datanode/DataBlockScanner.java | 7 +- .../hadoop/hdfs/server/datanode/DataNode.java | 10 +-- .../server/datanode/DirectoryScanner.java | 8 +- .../hdfs/server/datanode/FSDataset.java | 87 +++++++++---------- .../server/datanode/FSDatasetInterface.java | 14 +-- .../datanode/RoundRobinVolumesPolicy.java | 9 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 10 ++- .../org/apache/hadoop/hdfs/TestDFSRemove.java | 3 +- .../apache/hadoop/hdfs/TestFileCreation.java | 7 +- .../server/datanode/DataNodeTestUtils.java | 11 +++ .../server/datanode/SimulatedFSDataset.java | 7 +- .../hdfs/server/datanode/TestBlockReport.java | 14 +-- .../datanode/TestDataNodeVolumeFailure.java | 7 +- .../TestDataNodeVolumeFailureToleration.java | 15 +--- .../server/datanode/TestDirectoryScanner.java | 11 +-- .../datanode/TestRoundRobinVolumesPolicy.java | 9 +- .../datanode/TestSimulatedFSDataset.java | 25 +++--- 20 files changed, 144 insertions(+), 126 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f748a853ed2..f50be1cf88e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -162,6 +162,8 @@ Release 0.23.3 - UNRELEASED HDFS-2899. Service protocol changes in DatanodeProtocol to add multiple storages. (suresh) + HDFS-3021. Use generic type to declare FSDatasetInterface. (szetszwo) + IMPROVEMENTS HDFS-2018. Move all journal stream management code into one place. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index 54c1b6f3952..50e45750c2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -74,7 +74,7 @@ class BlockPoolSliceScanner { private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000; private DataNode datanode; - private final FSDatasetInterface dataset; + private final FSDatasetInterface dataset; // sorted set private TreeSet blockInfoSet; @@ -133,7 +133,8 @@ class BlockPoolSliceScanner { } } - BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset, + BlockPoolSliceScanner(DataNode datanode, + FSDatasetInterface dataset, Configuration conf, String bpid) { this.datanode = datanode; this.dataset = dataset; @@ -216,7 +217,7 @@ class BlockPoolSliceScanner { * otherwise, pick the first directory. */ File dir = null; - List volumes = dataset.getVolumes(); + final List volumes = dataset.getVolumes(); for (FSVolumeInterface vol : volumes) { File bpDir = vol.getDirectory(blockPoolId); if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java index 31cf30a925d..c96be75f125 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface; /************************************************** @@ -34,7 +33,7 @@ import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterfa * ***************************************************/ @InterfaceAudience.Private -public interface BlockVolumeChoosingPolicy { +public interface BlockVolumeChoosingPolicy { /** * Returns a specific FSVolume after applying a suitable choice algorithm @@ -48,7 +47,5 @@ public interface BlockVolumeChoosingPolicy { * @return the chosen volume to store the block. * @throws IOException when disks are unavailable or are full. */ - public FSVolumeInterface chooseVolume(List volumes, long blockSize) - throws IOException; - + public V chooseVolume(List volumes, long blockSize) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java index c0d0bff23c2..9f772bc60bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java @@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface; /** * DataBlockScanner manages block scanning for all the block pools. For each @@ -44,7 +45,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; public class DataBlockScanner implements Runnable { public static final Log LOG = LogFactory.getLog(DataBlockScanner.class); private final DataNode datanode; - private final FSDatasetInterface dataset; + private final FSDatasetInterface dataset; private final Configuration conf; /** @@ -55,7 +56,9 @@ public class DataBlockScanner implements Runnable { new TreeMap(); Thread blockScannerThread = null; - DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) { + DataBlockScanner(DataNode datanode, + FSDatasetInterface dataset, + Configuration conf) { this.datanode = datanode; this.dataset = dataset; this.conf = conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 86baa1a9526..56815257245 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.Util; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods; @@ -139,7 +140,6 @@ import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.Server; @@ -369,7 +369,7 @@ public class DataNode extends Configured volatile boolean shouldRun = true; private BlockPoolManager blockPoolManager; - public volatile FSDatasetInterface data = null; + public volatile FSDatasetInterface data = null; private String clusterId = null; public final static String EMPTY_DEL_HINT = ""; @@ -887,7 +887,7 @@ public class DataNode extends Configured * handshake with the the first namenode is completed. */ private void initStorage(final NamespaceInfo nsInfo) throws IOException { - final FSDatasetInterface.Factory factory + final FSDatasetInterface.Factory> factory = FSDatasetInterface.Factory.getFactory(conf); if (!factory.isSimulated()) { @@ -1782,11 +1782,11 @@ public class DataNode extends Configured /** * This method is used for testing. * Examples are adding and deleting blocks directly. - * The most common usage will be when the data node's storage is similated. + * The most common usage will be when the data node's storage is simulated. * * @return the fsdataset that stores the blocks */ - public FSDatasetInterface getFSDataset() { + FSDatasetInterface getFSDataset() { return data; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 40b51a28b2b..97ff5a8416e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -55,7 +55,7 @@ public class DirectoryScanner implements Runnable { private static final Log LOG = LogFactory.getLog(DirectoryScanner.class); private final DataNode datanode; - private final FSDatasetInterface dataset; + private final FSDatasetInterface dataset; private final ExecutorService reportCompileThreadPool; private final ScheduledExecutorService masterThread; private final long scanPeriodMsecs; @@ -219,7 +219,7 @@ public class DirectoryScanner implements Runnable { } } - DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) { + DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) { this.datanode = dn; this.dataset = dataset; int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, @@ -411,7 +411,7 @@ public class DirectoryScanner implements Runnable { } /** Is the given volume still valid in the dataset? */ - private static boolean isValid(final FSDatasetInterface dataset, + private static boolean isValid(final FSDatasetInterface dataset, final FSVolumeInterface volume) { for (FSVolumeInterface vol : dataset.getVolumes()) { if (vol == volume) { @@ -424,7 +424,7 @@ public class DirectoryScanner implements Runnable { /** Get lists of blocks on the disk sorted by blockId, per blockpool */ private Map getDiskReport() { // First get list of data directories - final List volumes = dataset.getVolumes(); + final List volumes = dataset.getVolumes(); ArrayList dirReports = new ArrayList(volumes.size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index b1540fe5a82..457be4158e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; @@ -74,13 +75,13 @@ import org.apache.hadoop.util.ReflectionUtils; * ***************************************************/ @InterfaceAudience.Private -class FSDataset implements FSDatasetInterface { +class FSDataset implements FSDatasetInterface { /** * A factory for creating FSDataset objects. */ - static class Factory extends FSDatasetInterface.Factory { + static class Factory extends FSDatasetInterface.Factory { @Override - public FSDatasetInterface createFSDatasetInterface(DataNode datanode, + public FSDataset createFSDatasetInterface(DataNode datanode, DataStorage storage, Configuration conf) throws IOException { return new FSDataset(datanode, storage, conf); } @@ -786,13 +787,13 @@ class FSDataset implements FSDatasetInterface { * Read access to this unmodifiable list is not synchronized. * This list is replaced on modification holding "this" lock. */ - private volatile List volumes = null; + private volatile List volumes = null; - BlockVolumeChoosingPolicy blockChooser; + BlockVolumeChoosingPolicy blockChooser; int numFailedVolumes; - FSVolumeSet(List volumes, int failedVols, - BlockVolumeChoosingPolicy blockChooser) { + FSVolumeSet(List volumes, int failedVols, + BlockVolumeChoosingPolicy blockChooser) { this.volumes = Collections.unmodifiableList(volumes); this.blockChooser = blockChooser; this.numFailedVolumes = failedVols; @@ -810,29 +811,29 @@ class FSDataset implements FSDatasetInterface { * @return next volume to store the block in. */ synchronized FSVolume getNextVolume(long blockSize) throws IOException { - return (FSVolume)blockChooser.chooseVolume(volumes, blockSize); + return blockChooser.chooseVolume(volumes, blockSize); } private long getDfsUsed() throws IOException { long dfsUsed = 0L; - for (FSVolumeInterface v : volumes) { - dfsUsed += ((FSVolume)v).getDfsUsed(); + for (FSVolume v : volumes) { + dfsUsed += v.getDfsUsed(); } return dfsUsed; } private long getBlockPoolUsed(String bpid) throws IOException { long dfsUsed = 0L; - for (FSVolumeInterface v : volumes) { - dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid); + for (FSVolume v : volumes) { + dfsUsed += v.getBlockPoolUsed(bpid); } return dfsUsed; } private long getCapacity() { long capacity = 0L; - for (FSVolumeInterface v : volumes) { - capacity += ((FSVolume)v).getCapacity(); + for (FSVolume v : volumes) { + capacity += v.getCapacity(); } return capacity; } @@ -845,17 +846,16 @@ class FSDataset implements FSDatasetInterface { return remaining; } - private void getVolumeMap(ReplicasMap volumeMap) - throws IOException { - for (FSVolumeInterface v : volumes) { - ((FSVolume)v).getVolumeMap(volumeMap); + private void getVolumeMap(ReplicasMap volumeMap) throws IOException { + for (FSVolume v : volumes) { + v.getVolumeMap(volumeMap); } } private void getVolumeMap(String bpid, ReplicasMap volumeMap) throws IOException { - for (FSVolumeInterface v : volumes) { - ((FSVolume)v).getVolumeMap(bpid, volumeMap); + for (FSVolume v : volumes) { + v.getVolumeMap(bpid, volumeMap); } } @@ -871,10 +871,10 @@ class FSDataset implements FSDatasetInterface { ArrayList removedVols = null; // Make a copy of volumes for performing modification - final List volumeList = new ArrayList(volumes); + final List volumeList = new ArrayList(volumes); for (int idx = 0; idx < volumeList.size(); idx++) { - FSVolume fsv = (FSVolume)volumeList.get(idx); + FSVolume fsv = volumeList.get(idx); try { fsv.checkDirs(); } catch (DiskErrorException e) { @@ -891,8 +891,8 @@ class FSDataset implements FSDatasetInterface { // Remove null volumes from the volumes array if (removedVols != null && removedVols.size() > 0) { - List newVols = new ArrayList(); - for (FSVolumeInterface vol : volumeList) { + final List newVols = new ArrayList(); + for (FSVolume vol : volumeList) { if (vol != null) { newVols.add(vol); } @@ -914,21 +914,21 @@ class FSDataset implements FSDatasetInterface { private void addBlockPool(String bpid, Configuration conf) throws IOException { - for (FSVolumeInterface v : volumes) { - ((FSVolume)v).addBlockPool(bpid, conf); + for (FSVolume v : volumes) { + v.addBlockPool(bpid, conf); } } private void removeBlockPool(String bpid) { - for (FSVolumeInterface v : volumes) { - ((FSVolume)v).shutdownBlockPool(bpid); + for (FSVolume v : volumes) { + v.shutdownBlockPool(bpid); } } private void shutdown() { - for (FSVolumeInterface volume : volumes) { + for (FSVolume volume : volumes) { if(volume != null) { - ((FSVolume)volume).shutdown(); + volume.shutdown(); } } } @@ -991,7 +991,7 @@ class FSDataset implements FSDatasetInterface { } @Override // FSDatasetInterface - public List getVolumes() { + public List getVolumes() { return volumes.volumes; } @@ -1099,7 +1099,7 @@ class FSDataset implements FSDatasetInterface { + ", volume failures tolerated: " + volFailuresTolerated); } - final List volArray = new ArrayList( + final List volArray = new ArrayList( storage.getNumStorageDirs()); for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { final File dir = storage.getStorageDir(idx).getCurrentDir(); @@ -1108,12 +1108,12 @@ class FSDataset implements FSDatasetInterface { } volumeMap = new ReplicasMap(this); - BlockVolumeChoosingPolicy blockChooserImpl = - (BlockVolumeChoosingPolicy) ReflectionUtils.newInstance( - conf.getClass(DFSConfigKeys.DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY, + @SuppressWarnings("unchecked") + final BlockVolumeChoosingPolicy blockChooserImpl = + ReflectionUtils.newInstance(conf.getClass( + DFSConfigKeys.DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY, RoundRobinVolumesPolicy.class, - BlockVolumeChoosingPolicy.class), - conf); + BlockVolumeChoosingPolicy.class), conf); volumes = new FSVolumeSet(volArray, volsFailed, blockChooserImpl); volumes.getVolumeMap(volumeMap); @@ -2001,7 +2001,7 @@ class FSDataset implements FSDatasetInterface { boolean error = false; for (int i = 0; i < invalidBlks.length; i++) { File f = null; - FSVolume v; + final FSVolume v; synchronized (this) { f = getFile(bpid, invalidBlks[i].getBlockId()); ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]); @@ -2553,8 +2553,7 @@ class FSDataset implements FSDatasetInterface { private Collection getVolumeInfo() { Collection info = new ArrayList(); - for (FSVolumeInterface v : volumes.volumes) { - final FSVolume volume = (FSVolume)v; + for (FSVolume volume : volumes.volumes) { long used = 0; long free = 0; try { @@ -2590,8 +2589,8 @@ class FSDataset implements FSDatasetInterface { public synchronized void deleteBlockPool(String bpid, boolean force) throws IOException { if (!force) { - for (FSVolumeInterface volume : volumes.volumes) { - if (!((FSVolume)volume).isBPDirEmpty(bpid)) { + for (FSVolume volume : volumes.volumes) { + if (!volume.isBPDirEmpty(bpid)) { DataNode.LOG.warn(bpid + " has some block files, cannot delete unless forced"); throw new IOException("Cannot delete block pool, " @@ -2599,8 +2598,8 @@ class FSDataset implements FSDatasetInterface { } } } - for (FSVolumeInterface volume : volumes.volumes) { - ((FSVolume)volume).deleteBPDirectories(bpid, force); + for (FSVolume volume : volumes.volumes) { + volume.deleteBPDirectories(bpid, force); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java index 2487ca6ed9b..6e2fb201c84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -50,13 +50,15 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; * */ @InterfaceAudience.Private -public interface FSDatasetInterface extends FSDatasetMBean { +public interface FSDatasetInterface + extends FSDatasetMBean { /** * A factory for creating FSDatasetInterface objects. */ - public abstract class Factory { + public abstract class Factory> { /** @return the configured factory. */ - public static Factory getFactory(Configuration conf) { + public static Factory getFactory(Configuration conf) { + @SuppressWarnings("rawtypes") final Class clazz = conf.getClass( DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY, FSDataset.Factory.class, @@ -65,7 +67,7 @@ public interface FSDatasetInterface extends FSDatasetMBean { } /** Create a FSDatasetInterface object. */ - public abstract FSDatasetInterface createFSDatasetInterface( + public abstract D createFSDatasetInterface( DataNode datanode, DataStorage storage, Configuration conf ) throws IOException; @@ -94,7 +96,7 @@ public interface FSDatasetInterface extends FSDatasetMBean { } /** @return a list of volumes. */ - public List getVolumes(); + public List getVolumes(); /** @return a volume information map (name => info). */ public Map getVolumeInfoMap(); @@ -234,7 +236,7 @@ public interface FSDatasetInterface extends FSDatasetMBean { this.checksum = checksum; } - void close() throws IOException { + void close() { IOUtils.closeStream(dataOut); IOUtils.closeStream(checksumOut); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java index 1463287268f..00fdffab2f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java @@ -23,13 +23,14 @@ import java.util.List; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy { +public class RoundRobinVolumesPolicy + implements BlockVolumeChoosingPolicy { private int curVolume = 0; @Override - public synchronized FSVolumeInterface chooseVolume( - List volumes, long blockSize) throws IOException { + public synchronized V chooseVolume(final List volumes, final long blockSize + ) throws IOException { if(volumes.size() < 1) { throw new DiskOutOfSpaceException("No more available volumes"); } @@ -44,7 +45,7 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy { long maxAvailable = 0; while (true) { - FSVolumeInterface volume = volumes.get(curVolume); + final V volume = volumes.get(curVolume); curVolume = (curVolume + 1) % volumes.size(); long availableVolumeSize = volume.getAvailable(); if (availableVolumeSize > blockSize) { return volume; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index c2052723062..c3cc6bbcabe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1565,8 +1565,8 @@ public class MiniDFSCluster { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } - return dataNodes.get(dataNodeIndex).datanode.getFSDataset().getBlockReport( - bpid); + final DataNode dn = dataNodes.get(dataNodeIndex).datanode; + return DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid); } @@ -1598,7 +1598,8 @@ public class MiniDFSCluster { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } - FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset(); + final DataNode dn = dataNodes.get(dataNodeIndex).datanode; + final FSDatasetInterface dataSet = DataNodeTestUtils.getFSDataset(dn); if (!(dataSet instanceof SimulatedFSDataset)) { throw new IOException("injectBlocks is valid only for SimilatedFSDataset"); } @@ -1616,7 +1617,8 @@ public class MiniDFSCluster { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } - FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset(); + final DataNode dn = dataNodes.get(dataNodeIndex).datanode; + final FSDatasetInterface dataSet = DataNodeTestUtils.getFSDataset(dn); if (!(dataSet instanceof SimulatedFSDataset)) { throw new IOException("injectBlocks is valid only for SimilatedFSDataset"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java index 8a8d404658e..1b23c5f319d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; public class TestDFSRemove extends junit.framework.TestCase { final Path dir = new Path("/test/remove/"); @@ -45,7 +46,7 @@ public class TestDFSRemove extends junit.framework.TestCase { static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException { long total = 0; for(DataNode node : cluster.getDataNodes()) { - total += node.getFSDataset().getDfsUsed(); + total += DataNodeTestUtils.getFSDataset(node).getDfsUsed(); } return total; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 89b65f5e057..bc43f845bdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; @@ -210,8 +211,10 @@ public class TestFileCreation extends junit.framework.TestCase { // can't check capacities for real storage since the OS file system may be changing under us. if (simulatedStorage) { DataNode dn = cluster.getDataNodes().get(0); - assertEquals(fileSize, dn.getFSDataset().getDfsUsed()); - assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize, dn.getFSDataset().getRemaining()); + FSDatasetInterface dataset = DataNodeTestUtils.getFSDataset(dn); + assertEquals(fileSize, dataset.getDfsUsed()); + assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize, + dataset.getRemaining()); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 26acd0560ac..c9be8f9524c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -41,6 +41,17 @@ public class DataNodeTestUtils { return dn.getDNRegistrationForBP(bpid); } + /** + * This method is used for testing. + * Examples are adding and deleting blocks directly. + * The most common usage will be when the data node's storage is simulated. + * + * @return the fsdataset that stores the blocks + */ + public static FSDatasetInterface getFSDataset(DataNode dn) { + return dn.getFSDataset(); + } + public static File getFile(DataNode dn, String bpid, long bid) { return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 04a93b7bb2e..bd873440081 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -61,10 +61,11 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; * * Note the synchronization is coarse grained - it is at each method. */ -public class SimulatedFSDataset implements FSDatasetInterface { - static class Factory extends FSDatasetInterface.Factory { +public class SimulatedFSDataset + implements FSDatasetInterface { + static class Factory extends FSDatasetInterface.Factory { @Override - public FSDatasetInterface createFSDatasetInterface(DataNode datanode, + public SimulatedFSDataset createFSDatasetInterface(DataNode datanode, DataStorage storage, Configuration conf) throws IOException { return new SimulatedFSDataset(datanode, storage, conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java index a12cb334801..f8318762757 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java @@ -210,13 +210,14 @@ public class TestBlockReport { LOG.debug("Number of blocks allocated " + lBlocks.size()); } + final DataNode dn0 = cluster.getDataNodes().get(DN_N0); for (ExtendedBlock b : blocks2Remove) { if(LOG.isDebugEnabled()) { LOG.debug("Removing the block " + b.getBlockName()); } for (File f : findAllFiles(dataDir, new MyFileFilter(b.getBlockName(), true))) { - cluster.getDataNodes().get(DN_N0).getFSDataset().unfinalizeBlock(b); + DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b); if (!f.delete()) LOG.warn("Couldn't delete " + b.getBlockName()); } @@ -225,9 +226,8 @@ public class TestBlockReport { waitTil(DN_RESCAN_EXTRA_WAIT); // all blocks belong to the same file, hence same BP - DataNode dn = cluster.getDataNodes().get(DN_N0); String poolId = cluster.getNamesystem().getBlockPoolId(); - DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); + DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId); StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(), new BlockListAsLongs(blocks, null).getBlockListAsLongs()) }; cluster.getNameNodeRpc().blockReport(dnR, poolId, report); @@ -602,15 +602,15 @@ public class TestBlockReport { cluster.waitActive(); // Look about specified DN for the replica of the block from 1st DN + final DataNode dn1 = cluster.getDataNodes().get(DN_N1); + final FSDataset dataset1 = (FSDataset)DataNodeTestUtils.getFSDataset(dn1); String bpid = cluster.getNamesystem().getBlockPoolId(); - Replica r = ((FSDataset) cluster.getDataNodes().get(DN_N1).getFSDataset()). - fetchReplicaInfo(bpid, bl.getBlockId()); + Replica r = dataset1.fetchReplicaInfo(bpid, bl.getBlockId()); long start = System.currentTimeMillis(); int count = 0; while (r == null) { waitTil(5); - r = ((FSDataset) cluster.getDataNodes().get(DN_N1).getFSDataset()). - fetchReplicaInfo(bpid, bl.getBlockId()); + r = dataset1.fetchReplicaInfo(bpid, bl.getBlockId()); long waiting_period = System.currentTimeMillis() - start; if (count++ % 100 == 0) if(LOG.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 1ebee2f89ae..1d9f803e917 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -145,8 +145,11 @@ public class TestDataNodeVolumeFailure { DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3 String bpid = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid); - StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(), - dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs()) }; + final StorageBlockReport[] report = { + new StorageBlockReport(dnR.getStorageID(), + DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid + ).getBlockListAsLongs()) + }; cluster.getNameNodeRpc().blockReport(dnR, bpid, report); // verify number of blocks and files... diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index 47bfa703db0..ea256853f98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -24,11 +24,7 @@ import static org.junit.Assume.assumeTrue; import java.io.File; import java.io.IOException; -import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -38,7 +34,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; -import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -47,12 +42,6 @@ import org.junit.Test; * Test the ability of a DN to tolerate volume failures. */ public class TestDataNodeVolumeFailureToleration { - - private static final Log LOG = LogFactory.getLog(TestDataNodeVolumeFailureToleration.class); - { - ((Log4JLogger)TestDataNodeVolumeFailureToleration.LOG).getLogger().setLevel(Level.ALL); - } - private FileSystem fs; private MiniDFSCluster cluster; private Configuration conf; @@ -130,7 +119,7 @@ public class TestDataNodeVolumeFailureToleration { assertTrue("The DN should have started up fine.", cluster.isDataNodeUp()); DataNode dn = cluster.getDataNodes().get(0); - String si = dn.getFSDataset().getStorageInfo(); + String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo(); assertTrue("The DN should have started with this directory", si.contains(dataDir1Actual.getPath())); assertFalse("The DN shouldn't have a bad directory.", @@ -227,7 +216,7 @@ public class TestDataNodeVolumeFailureToleration { */ private void testVolumeConfig(int volumesTolerated, int volumesFailed, boolean expectedBPServiceState, boolean manageDfsDirs) - throws IOException, InterruptedException, TimeoutException { + throws IOException, InterruptedException { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); final int dnIndex = 0; // Fail the current directory since invalid storage directory perms diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 1b0c158740f..8707805613c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface; +import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** * Tests {@link DirectoryScanner} handling of differences @@ -142,7 +142,7 @@ public class TestDirectoryScanner extends TestCase { /** Create a block file in a random volume*/ private long createBlockFile() throws IOException { - List volumes = fds.getVolumes(); + List volumes = fds.getVolumes(); int index = rand.nextInt(volumes.size() - 1); long id = getFreeBlockId(); File finalizedDir = volumes.get(index).getFinalizedDir(bpid); @@ -155,7 +155,7 @@ public class TestDirectoryScanner extends TestCase { /** Create a metafile in a random volume*/ private long createMetaFile() throws IOException { - List volumes = fds.getVolumes(); + List volumes = fds.getVolumes(); int index = rand.nextInt(volumes.size() - 1); long id = getFreeBlockId(); File finalizedDir = volumes.get(index).getFinalizedDir(bpid); @@ -168,7 +168,7 @@ public class TestDirectoryScanner extends TestCase { /** Create block file and corresponding metafile in a rondom volume */ private long createBlockMetaFile() throws IOException { - List volumes = fds.getVolumes(); + List volumes = fds.getVolumes(); int index = rand.nextInt(volumes.size() - 1); long id = getFreeBlockId(); File finalizedDir = volumes.get(index).getFinalizedDir(bpid); @@ -228,7 +228,8 @@ public class TestDirectoryScanner extends TestCase { try { cluster.waitActive(); bpid = cluster.getNamesystem().getBlockPoolId(); - fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset(); + fds = (FSDataset)DataNodeTestUtils.getFSDataset( + cluster.getDataNodes().get(0)); CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, parallelism); DataNode dn = cluster.getDataNodes().get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java index 73937efbc39..f401be3af15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java @@ -43,8 +43,10 @@ public class TestRoundRobinVolumesPolicy { volumes.add(Mockito.mock(FSVolumeInterface.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L); - RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance( - RoundRobinVolumesPolicy.class, null); + @SuppressWarnings("unchecked") + final RoundRobinVolumesPolicy policy = + (RoundRobinVolumesPolicy)ReflectionUtils.newInstance( + RoundRobinVolumesPolicy.class, null); // Test two rounds of round-robin choosing Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0)); @@ -79,7 +81,8 @@ public class TestRoundRobinVolumesPolicy { volumes.add(Mockito.mock(FSVolumeInterface.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L); - RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy(); + final RoundRobinVolumesPolicy policy + = new RoundRobinVolumesPolicy(); int blockSize = 700; try { policy.chooseVolume(volumes, blockSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 6a6c81a6fa2..752419fe288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -21,6 +21,7 @@ import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; + import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; @@ -28,8 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; import org.apache.hadoop.util.DataChecksum; @@ -56,7 +55,7 @@ public class TestSimulatedFSDataset extends TestCase { return blkid*BLOCK_LENGTH_MULTIPLIER; } - int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) + int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId) throws IOException { int bytesAdded = 0; for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) { @@ -83,24 +82,24 @@ public class TestSimulatedFSDataset extends TestCase { } return bytesAdded; } - int addSomeBlocks(FSDatasetInterface fsdataset ) throws IOException { + int addSomeBlocks(SimulatedFSDataset fsdataset ) throws IOException { return addSomeBlocks(fsdataset, 1); } public void testFSDatasetFactory() { final Configuration conf = new Configuration(); - FSDatasetInterface.Factory f = FSDatasetInterface.Factory.getFactory(conf); + FSDatasetInterface.Factory f = FSDatasetInterface.Factory.getFactory(conf); assertEquals(FSDataset.Factory.class, f.getClass()); assertFalse(f.isSimulated()); SimulatedFSDataset.setFactory(conf); - FSDatasetInterface.Factory s = FSDatasetInterface.Factory.getFactory(conf); + FSDatasetInterface.Factory s = FSDatasetInterface.Factory.getFactory(conf); assertEquals(SimulatedFSDataset.Factory.class, s.getClass()); assertTrue(s.isSimulated()); } public void testGetMetaData() throws IOException { - FSDatasetInterface fsdataset = getSimulatedFSDataset(); + final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0); try { assertFalse(fsdataset.metaFileExists(b)); @@ -121,7 +120,7 @@ public class TestSimulatedFSDataset extends TestCase { public void testStorageUsage() throws IOException { - FSDatasetInterface fsdataset = getSimulatedFSDataset(); + final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); assertEquals(fsdataset.getDfsUsed(), 0); assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity()); int bytesAdded = addSomeBlocks(fsdataset); @@ -131,7 +130,7 @@ public class TestSimulatedFSDataset extends TestCase { - void checkBlockDataAndSize(FSDatasetInterface fsdataset, ExtendedBlock b, + void checkBlockDataAndSize(SimulatedFSDataset fsdataset, ExtendedBlock b, long expectedLen) throws IOException { InputStream input = fsdataset.getBlockInputStream(b); long lengthRead = 0; @@ -144,7 +143,7 @@ public class TestSimulatedFSDataset extends TestCase { } public void testWriteRead() throws IOException { - FSDatasetInterface fsdataset = getSimulatedFSDataset(); + final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); addSomeBlocks(fsdataset); for (int i=1; i <= NUMBLOCKS; ++i) { ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); @@ -244,7 +243,7 @@ public class TestSimulatedFSDataset extends TestCase { } public void checkInvalidBlock(ExtendedBlock b) throws IOException { - FSDatasetInterface fsdataset = getSimulatedFSDataset(); + final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); assertFalse(fsdataset.isValidBlock(b)); try { fsdataset.getLength(b); @@ -269,7 +268,7 @@ public class TestSimulatedFSDataset extends TestCase { } public void testInValidBlocks() throws IOException { - FSDatasetInterface fsdataset = getSimulatedFSDataset(); + final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0); checkInvalidBlock(b); @@ -280,7 +279,7 @@ public class TestSimulatedFSDataset extends TestCase { } public void testInvalidate() throws IOException { - FSDatasetInterface fsdataset = getSimulatedFSDataset(); + final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); int bytesAdded = addSomeBlocks(fsdataset); Block[] deleteBlocks = new Block[2]; deleteBlocks[0] = new Block(1, 0, 0);