From b512e9f2828670dc1dfa6b4f6353423faa6dee34 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 22 Feb 2012 18:03:27 +0000 Subject: [PATCH 1/7] Merge -r 1292423:1292424 from trunk to branch. FIXES: MAPREDUCE-3884 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292427 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/mapreduce/v2/util/MRApps.java | 3 +++ .../org/apache/hadoop/mapreduce/v2/util/TestMRApps.java | 6 +++--- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 0f13ea9d494..590998ac647 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -40,6 +40,8 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3798. Fixed failing TestJobCleanup.testCusomCleanup() and moved it to the maven build. (Ravi Prakash via vinodkv) + + MAPREDUCE-3884. PWD should be first in the classpath of MR tasks (tucu) Release 0.23.1 - 2012-02-17 diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java index 019f1de0308..572ebde5515 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java @@ -230,6 +230,9 @@ public static void setClasspath(Map environment, boolean userClassesTakesPrecedence = conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false); + Apps.addToEnvironment(environment, + Environment.CLASSPATH.name(), + Environment.PWD.$()); if (!userClassesTakesPrecedence) { MRApps.setMRFrameworkClasspath(environment, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java index 77299a05c79..715b6c626d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java @@ -130,7 +130,7 @@ public class TestMRApps { Job job = Job.getInstance(); Map environment = new HashMap(); MRApps.setClasspath(environment, job.getConfiguration()); - assertEquals("$HADOOP_CONF_DIR:" + + assertEquals("$PWD:$HADOOP_CONF_DIR:" + "$HADOOP_COMMON_HOME/share/hadoop/common/*:" + "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*:" + "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*:" + @@ -152,7 +152,7 @@ public class TestMRApps { } String env_str = env.get("CLASSPATH"); assertSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!", - env_str.indexOf("job.jar"), 0); + env_str.indexOf("$PWD:job.jar"), 0); } @Test public void testSetClasspathWithNoUserPrecendence() { @@ -166,7 +166,7 @@ public class TestMRApps { } String env_str = env.get("CLASSPATH"); assertNotSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!", - env_str.indexOf("job.jar"), 0); + env_str.indexOf("$PWD:job.jar"), 0); } } From 05ea57177bb0b6e02703f0204937e77c4e864db3 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 22 Feb 2012 18:03:43 +0000 Subject: [PATCH 2/7] svn merge -c 1292419 from trunk for HDFS-2907. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292428 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 +- .../apache/hadoop/hdfs/HdfsConfiguration.java | 1 - .../hadoop/hdfs/server/datanode/DataNode.java | 100 ++++++------------ .../hdfs/server/datanode/FSDataset.java | 14 ++- .../server/datanode/FSDatasetInterface.java | 26 +++++ .../apache/hadoop/hdfs/DataNodeCluster.java | 8 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 2 +- .../apache/hadoop/hdfs/TestFileAppend.java | 8 +- .../apache/hadoop/hdfs/TestFileAppend2.java | 2 +- .../apache/hadoop/hdfs/TestFileAppend4.java | 2 +- .../apache/hadoop/hdfs/TestFileCreation.java | 14 +-- .../TestInjectionForSimulatedStorage.java | 4 +- .../apache/hadoop/hdfs/TestLargeBlock.java | 2 +- .../org/apache/hadoop/hdfs/TestPread.java | 2 +- .../apache/hadoop/hdfs/TestReplication.java | 2 +- .../hadoop/hdfs/TestSetrepIncreasing.java | 2 +- .../hdfs/TestShortCircuitLocalRead.java | 4 +- .../apache/hadoop/hdfs/TestSmallBlock.java | 2 +- .../hdfs/server/balancer/TestBalancer.java | 2 +- .../server/datanode/SimulatedFSDataset.java | 80 +++++++------- .../server/datanode/TestDataNodeMetrics.java | 2 +- .../datanode/TestSimulatedFSDataset.java | 20 +++- .../hdfs/server/namenode/TestFileLimit.java | 2 +- 24 files changed, 157 insertions(+), 153 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7acd64ae118..e4254cef608 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -25,6 +25,9 @@ Release 0.23.2 - UNRELEASED HDFS-2725. hdfs script usage information is missing the information about "dfs" command (Prashant Sharma via stevel) + HDFS-2907. Add a conf property dfs.datanode.fsdataset.factory to make + FSDataset in Datanode pluggable. (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index aef13d4ac34..3245aeac4c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -167,7 +167,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base"; public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id"; public static final String DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname"; - public static final String DFS_DATANODE_STORAGEID_KEY = "dfs.datanode.StorageId"; public static final String DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts"; public static final String DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude"; public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout"; @@ -211,10 +210,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64; public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours"; public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0; - public static final String DFS_DATANODE_SIMULATEDDATASTORAGE_KEY = "dfs.datanode.simulateddatastorage"; - public static final boolean DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT = false; - public static final String DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY = "dfs.datanode.simulateddatastorage.capacity"; - public static final long DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT = 2L<<40; public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed"; public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true; public static final String DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY = "dfs.datanode.block.volume.choice.policy"; @@ -282,6 +277,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { //Keys with no defaults public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins"; + public static final String DFS_DATANODE_FSDATASET_FACTORY_KEY = "dfs.datanode.fsdataset.factory"; public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout"; public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup"; public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index 44533f13a12..75ce9118a9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -88,7 +88,6 @@ private static void addDeprecatedKeys() { deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY); deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY); deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY); - deprecate("StorageId", DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY); deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY); deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY); deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 8f9cc25de11..888a0363ed1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -43,10 +43,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES; @@ -152,13 +149,11 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.GenericOptionsParser; -import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; -import com.google.common.base.Preconditions; /********************************************************** @@ -427,13 +422,14 @@ void refreshNamenodes(Configuration conf) } } - private synchronized void setClusterId(String cid) throws IOException { - if(clusterId != null && !clusterId.equals(cid)) { - throw new IOException ("cluster id doesn't match. old cid=" + clusterId - + " new cid="+ cid); + private synchronized void setClusterId(final String nsCid, final String bpid + ) throws IOException { + if(clusterId != null && !clusterId.equals(nsCid)) { + throw new IOException ("Cluster IDs not matched: dn cid=" + clusterId + + " but ns cid="+ nsCid + "; bpid=" + bpid); } // else - clusterId = cid; + clusterId = nsCid; } private static String getHostName(Configuration config) @@ -810,51 +806,22 @@ void shutdownBlockPool(BPOfferService bpos) { */ void initBlockPool(BPOfferService bpos) throws IOException { NamespaceInfo nsInfo = bpos.getNamespaceInfo(); - Preconditions.checkState(nsInfo != null, - "Block pool " + bpos + " should have retrieved " + - "its namespace info before calling initBlockPool."); + if (nsInfo == null) { + throw new IOException("NamespaceInfo not found: Block pool " + bpos + + " should have retrieved namespace info before initBlockPool."); + } - String blockPoolId = nsInfo.getBlockPoolID(); - // Register the new block pool with the BP manager. blockPoolManager.addBlockPool(bpos); - synchronized (this) { - // we do not allow namenode from different cluster to register - if(clusterId != null && !clusterId.equals(nsInfo.clusterID)) { - throw new IOException( - "cannot register with the namenode because clusterid do not match:" - + " nn=" + nsInfo.getBlockPoolID() + "; nn cid=" + nsInfo.clusterID + - ";dn cid=" + clusterId); - } - - setClusterId(nsInfo.clusterID); - } - - StartupOption startOpt = getStartupOption(conf); - assert startOpt != null : "Startup option must be set."; - - boolean simulatedFSDataset = conf.getBoolean( - DFS_DATANODE_SIMULATEDDATASTORAGE_KEY, - DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT); - - if (!simulatedFSDataset) { - // read storage info, lock data dirs and transition fs state if necessary - storage.recoverTransitionRead(DataNode.this, blockPoolId, nsInfo, - dataDirs, startOpt); - StorageInfo bpStorage = storage.getBPStorage(blockPoolId); - LOG.info("setting up storage: nsid=" + - bpStorage.getNamespaceID() + ";bpid=" - + blockPoolId + ";lv=" + storage.getLayoutVersion() + - ";nsInfo=" + nsInfo); - } + setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID()); // In the case that this is the first block pool to connect, initialize // the dataset, block scanners, etc. - initFsDataSet(); + initStorage(nsInfo); initPeriodicScanners(conf); - data.addBlockPool(blockPoolId, conf); + data.addBlockPool(nsInfo.getBlockPoolID(), conf); } /** @@ -881,31 +848,28 @@ int getBpOsCount() { * Initializes the {@link #data}. The initialization is done only once, when * handshake with the the first namenode is completed. */ - private synchronized void initFsDataSet() throws IOException { - if (data != null) { // Already initialized - return; + private void initStorage(final NamespaceInfo nsInfo) throws IOException { + final FSDatasetInterface.Factory factory + = FSDatasetInterface.Factory.getFactory(conf); + + if (!factory.isSimulated()) { + final StartupOption startOpt = getStartupOption(conf); + if (startOpt == null) { + throw new IOException("Startup option not set."); + } + final String bpid = nsInfo.getBlockPoolID(); + //read storage info, lock data dirs and transition fs state if necessary + storage.recoverTransitionRead(this, bpid, nsInfo, dataDirs, startOpt); + final StorageInfo bpStorage = storage.getBPStorage(bpid); + LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID() + + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion() + + ";nsInfo=" + nsInfo); } - // get version and id info from the name-node - boolean simulatedFSDataset = conf.getBoolean( - DFS_DATANODE_SIMULATEDDATASTORAGE_KEY, - DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT); - - if (simulatedFSDataset) { - storage.createStorageID(getPort()); - // it would have been better to pass storage as a parameter to - // constructor below - need to augment ReflectionUtils used below. - conf.set(DFS_DATANODE_STORAGEID_KEY, getStorageId()); - try { - data = (FSDatasetInterface) ReflectionUtils.newInstance( - Class.forName( - "org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), - conf); - } catch (ClassNotFoundException e) { - throw new IOException(StringUtils.stringifyException(e)); + synchronized(this) { + if (data == null) { + data = factory.createFSDatasetInterface(this, storage, conf); } - } else { - data = new FSDataset(this, storage, conf); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 8f649bcef5f..5927217c1d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -76,6 +76,16 @@ ***************************************************/ @InterfaceAudience.Private class FSDataset implements FSDatasetInterface { + /** + * A factory for creating FSDataset objects. + */ + static class Factory extends FSDatasetInterface.Factory { + @Override + public FSDatasetInterface createFSDatasetInterface(DataNode datanode, + DataStorage storage, Configuration conf) throws IOException { + return new FSDataset(datanode, storage, conf); + } + } /** * A node type that can be built into a tree reflecting the @@ -1057,8 +1067,8 @@ public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b) /** * An FSDataset has a directory where it loads its data files. */ - FSDataset(DataNode datanode, DataStorage storage, Configuration conf) - throws IOException { + private FSDataset(DataNode datanode, DataStorage storage, Configuration conf + ) throws IOException { this.datanode = datanode; this.maxBlocksPerDir = conf.getInt(DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java index ff3974f627a..48618cd2611 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; @@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; /** @@ -49,6 +51,30 @@ */ @InterfaceAudience.Private public interface FSDatasetInterface extends FSDatasetMBean { + /** + * A factory for creating FSDatasetInterface objects. + */ + public abstract class Factory { + /** @return the configured factory. */ + public static Factory getFactory(Configuration conf) { + final Class clazz = conf.getClass( + DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY, + FSDataset.Factory.class, + Factory.class); + return ReflectionUtils.newInstance(clazz, conf); + } + + /** Create a FSDatasetInterface object. */ + public abstract FSDatasetInterface createFSDatasetInterface( + DataNode datanode, DataStorage storage, Configuration conf + ) throws IOException; + + /** Does the factory create simulated objects? */ + public boolean isSimulated() { + return false; + } + } + /** * This is an interface for the underlying volume. * @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java index f3350b988a7..25198e36925 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog; import org.apache.hadoop.net.DNS; @@ -122,10 +123,9 @@ public static void main(String[] args) { } dataNodeDirs = args[i]; } else if (args[i].equals("-simulated")) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } else if (args[i].equals("-inject")) { - if (!conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, - false) ) { + if (!FSDatasetInterface.Factory.getFactory(conf).isSimulated()) { System.out.print("-inject is valid only for simulated"); printUsageExit(); } @@ -158,7 +158,7 @@ public static void main(String[] args) { System.exit(-1); } boolean simulated = - conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false); + FSDatasetInterface.Factory.getFactory(conf).isSimulated(); System.out.println("Starting " + numDataNodes + (simulated ? " Simulated " : " ") + " Data Nodes that will connect to Name Node at " + nameNodeAdr); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index d954690b55e..66a4f94d97c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -875,7 +875,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); } if (simulatedCapacities != null) { - dnConf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(dnConf); dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, simulatedCapacities[i-curDatanodesNum]); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index d8b58f7cb6c..20f28376a8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -107,7 +107,7 @@ private void checkFile(FileSystem fileSys, Path name, int repl) public void testCopyOnWrite() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); @@ -178,7 +178,7 @@ public void testCopyOnWrite() throws IOException { public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -234,7 +234,7 @@ public void testSimpleFlush() throws IOException { public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -283,7 +283,7 @@ public void testComplexFlush() throws IOException { public void testFileNotFound() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index c63c4ecc4ce..af27e00820d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -82,7 +82,7 @@ public class TestFileAppend2 extends TestCase { public void testSimpleAppend() throws IOException { final Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setBoolean("dfs.support.append", true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 1ba56d3844e..ab60c4c2210 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -77,7 +77,7 @@ public class TestFileAppend4 { public void setUp() throws Exception { this.conf = new Configuration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index ed1508a2680..9fc7e78dbab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -144,7 +144,7 @@ public void testServerDefaults() throws IOException { public void testFileCreation() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); @@ -223,7 +223,7 @@ public void testFileCreation() throws IOException { public void testDeleteOnExit() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); @@ -287,7 +287,7 @@ public void testFileCreationError1() throws IOException { conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } // create cluster MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -361,7 +361,7 @@ public void testFileCreationError2() throws IOException { conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } // create cluster MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -460,7 +460,7 @@ public void xxxtestFileCreationNamenodeRestart() throws IOException { conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } // create cluster @@ -599,7 +599,7 @@ public void testDFSClientDeath() throws IOException, InterruptedException { Configuration conf = new HdfsConfiguration(); System.out.println("Testing adbornal client death."); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); @@ -634,7 +634,7 @@ public void testDFSClientDeath() throws IOException, InterruptedException { public void testFileCreationNonRecursive() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index 9491cd13a77..f2de8d805fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -136,7 +136,7 @@ public void testInjection() throws IOException { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes)); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize); - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); //first time format cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); @@ -159,7 +159,7 @@ public void testInjection() throws IOException { LOG.info("Restarting minicluster"); conf = new HdfsConfiguration(); - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); cluster = new MiniDFSCluster.Builder(conf) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index 6f2af4043c1..3a6d46cd7a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -174,7 +174,7 @@ public void runTest(final long blockSize) throws IOException { Configuration conf = new Configuration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index e9de0ad0193..d0ab5afb6cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -206,7 +206,7 @@ private void dfsPreadTest(boolean disableTransferTo) throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096); if (simulatedStorage) { - conf.setBoolean("dfs.datanode.simulateddatastorage", true); + SimulatedFSDataset.setFactory(conf); } if (disableTransferTo) { conf.setBoolean("dfs.datanode.transferTo.allowed", false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index eef83e4174d..066c8c79a53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -199,7 +199,7 @@ public void runReplication(boolean simulated) throws IOException { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); if (simulated) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDatanodes) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java index 0d3a0399e91..29c1aa221e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java @@ -28,7 +28,7 @@ public class TestSetrepIncreasing extends TestCase { static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 34ed50a9cdd..eb2a1d80ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -124,7 +124,7 @@ public void doTestShortCircuitRead(boolean ignoreChecksum, int size, conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName()); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); @@ -248,7 +248,7 @@ public void testSkipWithVerifyChecksum() throws IOException { conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName()); if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java index 8fb2b7a38cf..efcb74ad1e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java @@ -92,7 +92,7 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { public void testSmallBlock() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean("dfs.datanode.simulateddatastorage", true); + SimulatedFSDataset.setFactory(conf); } conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 34cd784bd04..eb567469ab9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -77,7 +77,7 @@ static void initConf(Configuration conf) { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index c9cecd11b9c..3e7c5199636 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -22,7 +22,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -32,7 +31,6 @@ import javax.management.ObjectName; import javax.management.StandardMBean; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; @@ -63,21 +61,33 @@ * * Note the synchronization is coarse grained - it is at each method. */ +public class SimulatedFSDataset implements FSDatasetInterface { + static class Factory extends FSDatasetInterface.Factory { + @Override + public FSDatasetInterface createFSDatasetInterface(DataNode datanode, + DataStorage storage, Configuration conf) throws IOException { + return new SimulatedFSDataset(datanode, storage, conf); + } -public class SimulatedFSDataset implements FSDatasetInterface, Configurable{ + @Override + public boolean isSimulated() { + return true; + } + } + + public static void setFactory(Configuration conf) { + conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY, + Factory.class.getName()); + } - public static final String CONFIG_PROPERTY_SIMULATED = - "dfs.datanode.simulateddatastorage"; public static final String CONFIG_PROPERTY_CAPACITY = - "dfs.datanode.simulateddatastorage.capacity"; + "dfs.datanode.simulateddatastorage.capacity"; public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte - public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte - byte simulatedDataByte = DEFAULT_DATABYTE; - Configuration conf = null; + public static final byte DEFAULT_DATABYTE = 9; - static byte[] nullCrcFileData; - { + static final byte[] nullCrcFileData; + static { DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum. CHECKSUM_NULL, 16*1024 ); byte[] nullCrcHeader = checksum.getHeader(); @@ -360,31 +370,22 @@ private SimulatedBPStorage getBPStorage(String bpid) throws IOException { } } - private Map> blockMap = null; - private SimulatedStorage storage = null; - private String storageId; + private final Map> blockMap + = new HashMap>(); + private final SimulatedStorage storage; + private final String storageId; - public SimulatedFSDataset(Configuration conf) throws IOException { - setConf(conf); - } - - // Constructor used for constructing the object using reflection - @SuppressWarnings("unused") - private SimulatedFSDataset() { // real construction when setConf called.. - } - - public Configuration getConf() { - return conf; - } - - public void setConf(Configuration iconf) { - conf = iconf; - storageId = conf.get(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, "unknownStorageId" + - new Random().nextInt()); + public SimulatedFSDataset(DataNode datanode, DataStorage storage, + Configuration conf) { + if (storage != null) { + storage.createStorageID(datanode.getPort()); + this.storageId = storage.getStorageID(); + } else { + this.storageId = "unknownStorageId" + new Random().nextInt(); + } registerMBean(storageId); - storage = new SimulatedStorage( + this.storage = new SimulatedStorage( conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY)); - blockMap = new HashMap>(); } public synchronized void injectBlocks(String bpid, @@ -441,23 +442,16 @@ public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException { @Override public synchronized BlockListAsLongs getBlockReport(String bpid) { + final List blocks = new ArrayList(); final Map map = blockMap.get(bpid); - Block[] blockTable = new Block[map.size()]; if (map != null) { - int count = 0; for (BInfo b : map.values()) { if (b.isFinalized()) { - blockTable[count++] = b.theBlock; + blocks.add(b.theBlock); } } - if (count != blockTable.length) { - blockTable = Arrays.copyOf(blockTable, count); - } - } else { - blockTable = new Block[0]; } - return new BlockListAsLongs( - new ArrayList(Arrays.asList(blockTable)), null); + return new BlockListAsLongs(blocks, null); } @Override // FSDatasetMBean diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index f35e93cb157..bfff3ff19e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -34,7 +34,7 @@ public class TestDataNodeMetrics extends TestCase { public void testDataNodeMetrics() throws Exception { Configuration conf = new HdfsConfiguration(); - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { FileSystem fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 214b4e71a66..6a6c81a6fa2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -44,8 +44,8 @@ public class TestSimulatedFSDataset extends TestCase { protected void setUp() throws Exception { super.setUp(); - conf = new HdfsConfiguration(); - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + conf = new HdfsConfiguration(); + SimulatedFSDataset.setFactory(conf); } protected void tearDown() throws Exception { @@ -86,6 +86,18 @@ int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) int addSomeBlocks(FSDatasetInterface fsdataset ) throws IOException { return addSomeBlocks(fsdataset, 1); } + + public void testFSDatasetFactory() { + final Configuration conf = new Configuration(); + FSDatasetInterface.Factory f = FSDatasetInterface.Factory.getFactory(conf); + assertEquals(FSDataset.Factory.class, f.getClass()); + assertFalse(f.isSimulated()); + + SimulatedFSDataset.setFactory(conf); + FSDatasetInterface.Factory s = FSDatasetInterface.Factory.getFactory(conf); + assertEquals(SimulatedFSDataset.Factory.class, s.getClass()); + assertTrue(s.isSimulated()); + } public void testGetMetaData() throws IOException { FSDatasetInterface fsdataset = getSimulatedFSDataset(); @@ -287,8 +299,8 @@ public void testInvalidate() throws IOException { } } - private SimulatedFSDataset getSimulatedFSDataset() throws IOException { - SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); + private SimulatedFSDataset getSimulatedFSDataset() { + SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, null, conf); fsdataset.addBlockPool(bpid, conf); return fsdataset; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java index 48ab6ce18e1..0bdebec9f45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @@ -83,7 +83,7 @@ public void testFileLimit() throws IOException { int currentNodes = 0; if (simulatedStorage) { - conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); From c66a26dd8b015ccef6458c0e938145205e859661 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Wed, 22 Feb 2012 20:01:59 +0000 Subject: [PATCH 3/7] svn merge -c 1292483 from trunk to branch-0.23 FIXES HADOOP-7660 Maven generated .classpath doesnot includes "target/generated-test-source/java" as source directory. (Laxman via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292485 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++++ hadoop-common-project/hadoop-common/pom.xml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5a5d4663ce0..d321c8df4be 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -23,6 +23,10 @@ Release 0.23.2 - UNRELEASED BUG FIXES + HADOOP-7660. Maven generated .classpath doesnot includes + "target/generated-test-source/java" as source directory. + (Laxman via bobby) + HADOOP-8042 When copying a file out of HDFS, modifying it, and uploading it back into HDFS, the put fails due to a CRC mismatch (Daryn Sharp via bobby) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 01c44339933..88e5953e7b0 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -454,7 +454,7 @@ add-test-source - generate-test-sources + generate-sources add-test-source From 827e5e89f7ef3f894eeff14359def0b8e543957b Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 23 Feb 2012 01:23:17 +0000 Subject: [PATCH 4/7] HDFS-2985. Improve logging when replicas are marked as corrupt. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292610 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../server/blockmanagement/BlockManager.java | 99 +++++++++++++------ .../blockmanagement/CorruptReplicasMap.java | 18 +++- .../server/namenode/NameNodeRpcServer.java | 3 +- .../hadoop/hdfs/TestFileCorruption.java | 2 +- .../TestCorruptReplicaInfo.java | 8 +- .../namenode/metrics/TestNameNodeMetrics.java | 6 +- 7 files changed, 99 insertions(+), 39 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e4254cef608..27cfe030d75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -28,6 +28,8 @@ Release 0.23.2 - UNRELEASED HDFS-2907. Add a conf property dfs.datanode.fsdataset.factory to make FSDataset in Datanode pluggable. (szetszwo) + HDFS-2985. Improve logging when replicas are marked as corrupt. (todd) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 19968f75fd4..44411fa7301 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -805,9 +805,11 @@ private void addToInvalidates(Block b) { * Mark the block belonging to datanode as corrupt * @param blk Block to be marked as corrupt * @param dn Datanode which holds the corrupt replica + * @param reason a textual reason why the block should be marked corrupt, + * for logging purposes */ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, - final DatanodeInfo dn) throws IOException { + final DatanodeInfo dn, String reason) throws IOException { namesystem.writeLock(); try { final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock()); @@ -820,14 +822,15 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, + blk + " not found."); return; } - markBlockAsCorrupt(storedBlock, dn); + markBlockAsCorrupt(storedBlock, dn, reason); } finally { namesystem.writeUnlock(); } } private void markBlockAsCorrupt(BlockInfo storedBlock, - DatanodeInfo dn) throws IOException { + DatanodeInfo dn, + String reason) throws IOException { assert storedBlock != null : "storedBlock should not be null"; DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { @@ -851,7 +854,7 @@ private void markBlockAsCorrupt(BlockInfo storedBlock, node.addBlock(storedBlock); // Add this replica to corruptReplicas Map - corruptReplicas.addToCorruptReplicasMap(storedBlock, node); + corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason); if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(storedBlock, node); @@ -1313,6 +1316,21 @@ private static class StatefulBlockInfo { this.reportedState = reportedState; } } + + /** + * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a + * list of blocks that should be considered corrupt due to a block report. + */ + private static class BlockToMarkCorrupt { + final BlockInfo blockInfo; + final String reason; + + BlockToMarkCorrupt(BlockInfo blockInfo, String reason) { + super(); + this.blockInfo = blockInfo; + this.reason = reason; + } + } /** * The given datanode is reporting all its blocks. @@ -1367,7 +1385,7 @@ private void processReport(final DatanodeDescriptor node, Collection toAdd = new LinkedList(); Collection toRemove = new LinkedList(); Collection toInvalidate = new LinkedList(); - Collection toCorrupt = new LinkedList(); + Collection toCorrupt = new LinkedList(); Collection toUC = new LinkedList(); reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); @@ -1387,8 +1405,8 @@ private void processReport(final DatanodeDescriptor node, + " does not belong to any file."); addToInvalidates(b, node); } - for (BlockInfo b : toCorrupt) { - markBlockAsCorrupt(b, node); + for (BlockToMarkCorrupt b : toCorrupt) { + markBlockAsCorrupt(b.blockInfo, node, b.reason); } } @@ -1419,8 +1437,10 @@ private void processFirstBlockReport(final DatanodeDescriptor node, // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); - if (isReplicaCorrupt(iblk, reportedState, storedBlock, ucState, node)) { - markBlockAsCorrupt(storedBlock, node); + BlockToMarkCorrupt c = checkReplicaCorrupt( + iblk, reportedState, storedBlock, ucState, node); + if (c != null) { + markBlockAsCorrupt(c.blockInfo, node, c.reason); continue; } @@ -1442,7 +1462,7 @@ private void reportDiff(DatanodeDescriptor dn, Collection toAdd, // add to DatanodeDescriptor Collection toRemove, // remove from DatanodeDescriptor Collection toInvalidate, // should be removed from DN - Collection toCorrupt, // add to corrupt replicas list + Collection toCorrupt, // add to corrupt replicas list Collection toUC) { // add to under-construction list // place a delimiter in the list which separates blocks // that have been reported from those that have not @@ -1505,7 +1525,7 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, final Block block, final ReplicaState reportedState, final Collection toAdd, final Collection toInvalidate, - final Collection toCorrupt, + final Collection toCorrupt, final Collection toUC) { if(LOG.isDebugEnabled()) { @@ -1536,8 +1556,10 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, return storedBlock; } - if (isReplicaCorrupt(block, reportedState, storedBlock, ucState, dn)) { - toCorrupt.add(storedBlock); + BlockToMarkCorrupt c = checkReplicaCorrupt( + block, reportedState, storedBlock, ucState, dn); + if (c != null) { + toCorrupt.add(c); return storedBlock; } @@ -1561,8 +1583,11 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, * as switch statements, on the theory that it is easier to understand * the combinatorics of reportedState and ucState that way. It should be * at least as efficient as boolean expressions. + * + * @return a BlockToMarkCorrupt object, or null if the replica is not corrupt */ - private boolean isReplicaCorrupt(Block iblk, ReplicaState reportedState, + private BlockToMarkCorrupt checkReplicaCorrupt( + Block iblk, ReplicaState reportedState, BlockInfo storedBlock, BlockUCState ucState, DatanodeDescriptor dn) { switch(reportedState) { @@ -1570,17 +1595,31 @@ private boolean isReplicaCorrupt(Block iblk, ReplicaState reportedState, switch(ucState) { case COMPLETE: case COMMITTED: - return (storedBlock.getGenerationStamp() != iblk.getGenerationStamp() - || storedBlock.getNumBytes() != iblk.getNumBytes()); + if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) { + return new BlockToMarkCorrupt(storedBlock, + "block is " + ucState + " and reported genstamp " + + iblk.getGenerationStamp() + " does not match " + + "genstamp in block map " + storedBlock.getGenerationStamp()); + } else if (storedBlock.getNumBytes() != iblk.getNumBytes()) { + return new BlockToMarkCorrupt(storedBlock, + "block is " + ucState + " and reported length " + + iblk.getNumBytes() + " does not match " + + "length in block map " + storedBlock.getNumBytes()); + } else { + return null; // not corrupt + } default: - return false; + return null; } case RBW: case RWR: if (!storedBlock.isComplete()) { - return false; + return null; // not corrupt } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) { - return true; + return new BlockToMarkCorrupt(storedBlock, + "reported " + reportedState + " replica with genstamp " + + iblk.getGenerationStamp() + " does not match COMPLETE block's " + + "genstamp in block map " + storedBlock.getGenerationStamp()); } else { // COMPLETE block, same genstamp if (reportedState == ReplicaState.RBW) { // If it's a RBW report for a COMPLETE block, it may just be that @@ -1590,18 +1629,22 @@ private boolean isReplicaCorrupt(Block iblk, ReplicaState reportedState, LOG.info("Received an RBW replica for block " + storedBlock + " on " + dn.getName() + ": ignoring it, since the block is " + "complete with the same generation stamp."); - return false; + return null; } else { - return true; + return new BlockToMarkCorrupt(storedBlock, + "reported replica has invalid state " + reportedState); } } case RUR: // should not be reported case TEMPORARY: // should not be reported default: - LOG.warn("Unexpected replica state " + reportedState - + " for block: " + storedBlock + - " on " + dn.getName() + " size " + storedBlock.getNumBytes()); - return true; + String msg = "Unexpected replica state " + reportedState + + " for block: " + storedBlock + + " on " + dn.getName() + " size " + storedBlock.getNumBytes(); + // log here at WARN level since this is really a broken HDFS + // invariant + LOG.warn(msg); + return new BlockToMarkCorrupt(storedBlock, msg); } } @@ -2132,7 +2175,7 @@ void addBlock(DatanodeDescriptor node, Block block, String delHint) // blockReceived reports a finalized block Collection toAdd = new LinkedList(); Collection toInvalidate = new LinkedList(); - Collection toCorrupt = new LinkedList(); + Collection toCorrupt = new LinkedList(); Collection toUC = new LinkedList(); processReportedBlock(node, block, ReplicaState.FINALIZED, toAdd, toInvalidate, toCorrupt, toUC); @@ -2153,8 +2196,8 @@ void addBlock(DatanodeDescriptor node, Block block, String delHint) + " does not belong to any file."); addToInvalidates(b, node); } - for (BlockInfo b : toCorrupt) { - markBlockAsCorrupt(b, node); + for (BlockToMarkCorrupt b : toCorrupt) { + markBlockAsCorrupt(b.blockInfo, node, b.reason); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index ebef0022172..083d39ef33e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -44,25 +44,37 @@ public class CorruptReplicasMap{ * * @param blk Block to be added to CorruptReplicasMap * @param dn DatanodeDescriptor which holds the corrupt replica + * @param reason a textual reason (for logging purposes) */ - public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn) { + public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, + String reason) { Collection nodes = getNodes(blk); if (nodes == null) { nodes = new TreeSet(); corruptReplicasMap.put(blk, nodes); } + + String reasonText; + if (reason != null) { + reasonText = " because " + reason; + } else { + reasonText = ""; + } + if (!nodes.contains(dn)) { nodes.add(dn); NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ blk.getBlockName() + " added as corrupt on " + dn.getName() + - " by " + Server.getRemoteIp()); + " by " + Server.getRemoteIp() + + reasonText); } else { NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ "duplicate requested for " + blk.getBlockName() + " to add as corrupt " + "on " + dn.getName() + - " by " + Server.getRemoteIp()); + " by " + Server.getRemoteIp() + + reasonText); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 4a67fec5100..087079ed579 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -459,7 +459,8 @@ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { DatanodeInfo[] nodes = blocks[i].getLocations(); for (int j = 0; j < nodes.length; j++) { DatanodeInfo dn = nodes[j]; - namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn); + namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn, + "client machine reported it"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index d5ba1992a8f..af9d05c061b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -147,7 +147,7 @@ public void testArrayOutOfBoundsException() throws Exception { DatanodeRegistration dnR = DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId()); cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt( - blk, new DatanodeInfo(dnR)); + blk, new DatanodeInfo(dnR), "TEST"); // open the file fs.open(FILE_PATH); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index ab63d7e085e..3b7eccdca6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -83,14 +83,14 @@ public void testCorruptReplicaInfo() throws IOException, DatanodeDescriptor dn1 = new DatanodeDescriptor(); DatanodeDescriptor dn2 = new DatanodeDescriptor(); - crm.addToCorruptReplicasMap(getBlock(0), dn1); + crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST"); assertEquals("Number of corrupt blocks not returning correctly", 1, crm.size()); - crm.addToCorruptReplicasMap(getBlock(1), dn1); + crm.addToCorruptReplicasMap(getBlock(1), dn1, "TEST"); assertEquals("Number of corrupt blocks not returning correctly", 2, crm.size()); - crm.addToCorruptReplicasMap(getBlock(1), dn2); + crm.addToCorruptReplicasMap(getBlock(1), dn2, "TEST"); assertEquals("Number of corrupt blocks not returning correctly", 2, crm.size()); @@ -103,7 +103,7 @@ public void testCorruptReplicaInfo() throws IOException, 0, crm.size()); for (Long block_id: block_ids) { - crm.addToCorruptReplicasMap(getBlock(block_id), dn1); + crm.addToCorruptReplicasMap(getBlock(block_id), dn1, "TEST"); } assertEquals("Number of corrupt blocks not returning correctly", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 1a9db4a63e2..1f252ca5e09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -174,7 +174,8 @@ public void testCorruptBlock() throws Exception { // Corrupt first replica of the block LocatedBlock block = NameNodeAdapter.getBlockLocations( cluster.getNameNode(), file.toString(), 0, 1).get(0); - bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0]); + bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], + "TEST"); updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("CorruptBlocks", 1L, rb); @@ -213,7 +214,8 @@ public void testMissingBlock() throws Exception { // Corrupt the only replica of the block to result in a missing block LocatedBlock block = NameNodeAdapter.getBlockLocations( cluster.getNameNode(), file.toString(), 0, 1).get(0); - bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0]); + bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], + "TEST"); updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("UnderReplicatedBlocks", 1L, rb); From 88c38a84dc05f6298d646f08df22f98435d63699 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 23 Feb 2012 02:20:10 +0000 Subject: [PATCH 5/7] svn merge -c 1243690 from trunk for HDFS-2944. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292622 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++++ .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 27cfe030d75..fdb0b369afb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -57,6 +57,10 @@ Release 0.23.2 - UNRELEASED HDFS-2969. ExtendedBlock.equals is incorrectly implemented (todd) + HDFS-2944. Typo in hdfs-default.xml causes + dfs.client.block.write.replace-datanode-on-failure.enable to be mistakenly + disabled. (atm) + Release 0.23.1 - 2012-02-17 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 384674fe930..d70bc1ee030 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -347,7 +347,7 @@ creations/deletions), or "all". dfs.client.block.write.replace-datanode-on-failure.enable - ture + false If there is a datanode/network failure in the write pipeline, DFSClient will try to remove the failed datanode from the pipeline @@ -355,7 +355,7 @@ creations/deletions), or "all". the number of datanodes in the pipeline is decreased. The feature is to add new datanodes to the pipeline. - This is a site-wise property to enable/disable the feature. + This is a site-wide property to enable/disable the feature. See also dfs.client.block.write.replace-datanode-on-failure.policy From 6084873b82102b565aefc3ee69616d91df492e54 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 23 Feb 2012 02:27:49 +0000 Subject: [PATCH 6/7] svn merge -c 1292626 from trunk for HDFS-2981. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292627 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++++ .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fdb0b369afb..38fd1d813f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -61,6 +61,10 @@ Release 0.23.2 - UNRELEASED dfs.client.block.write.replace-datanode-on-failure.enable to be mistakenly disabled. (atm) + HDFS-2981. In hdfs-default.xml, the default value of + dfs.client.block.write.replace-datanode-on-failure.enable should be true. + (szetszwo) + Release 0.23.1 - 2012-02-17 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index d70bc1ee030..1ce090be947 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -347,7 +347,7 @@ creations/deletions), or "all". dfs.client.block.write.replace-datanode-on-failure.enable - false + true If there is a datanode/network failure in the write pipeline, DFSClient will try to remove the failed datanode from the pipeline From 1714f8b6c7d6df099981d5b7c99a349c4ad644ba Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Thu, 23 Feb 2012 15:57:47 +0000 Subject: [PATCH 7/7] merge -r 1292830:1292831 from trunk to branch-0.23. FIXES: MAPREDUCE-3878 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1292834 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../mapreduce/v2/app/webapp/AppController.java | 12 +++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 590998ac647..3c37e2189d0 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -42,6 +42,9 @@ Release 0.23.2 - UNRELEASED to the maven build. (Ravi Prakash via vinodkv) MAPREDUCE-3884. PWD should be first in the classpath of MR tasks (tucu) + + MAPREDUCE-3878. Null user on filtered jobhistory job page (Jonathon Eagles + via tgraves) Release 0.23.1 - 2012-02-17 diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java index 9b4b620de52..f9583da5a97 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java @@ -343,9 +343,15 @@ void accessDenied(String s) { * @return True if the requesting user has permission to view the job */ boolean checkAccess(Job job) { - UserGroupInformation callerUgi = UserGroupInformation.createRemoteUser( - request().getRemoteUser()); - return job.checkAccess(callerUgi, JobACL.VIEW_JOB); + String remoteUser = request().getRemoteUser(); + UserGroupInformation callerUGI = null; + if (remoteUser != null) { + callerUGI = UserGroupInformation.createRemoteUser(remoteUser); + } + if (callerUGI != null && !job.checkAccess(callerUGI, JobACL.VIEW_JOB)) { + return false; + } + return true; } /**