From cbfd6eecbddc8833b11aada3658e589c1bf099d6 Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Wed, 24 Aug 2016 03:41:54 -0700 Subject: [PATCH] HBASE-16474 Remove dfs.support.append related code and documentation --- .../org/apache/hadoop/hbase/util/FSUtils.java | 35 +------------------ .../hbase/TestFullLogReconstruction.java | 1 - .../apache/hadoop/hbase/TestIOFencing.java | 3 +- .../apache/hadoop/hbase/TestZooKeeper.java | 1 - .../hadoop/hbase/client/TestAdmin2.java | 2 -- .../hbase/coprocessor/TestWALObserver.java | 1 - .../hadoop/hbase/fs/TestBlockReorder.java | 1 - .../hbase/master/TestMasterTransitions.java | 1 - ...DeleteColumnFamilyProcedureFromClient.java | 1 - .../wal/AbstractTestProtobufLog.java | 2 -- .../wal/AbstractTestWALReplay.java | 1 - .../regionserver/wal/TestLogRollAbort.java | 9 ----- .../regionserver/wal/TestLogRolling.java | 10 ------ .../replication/TestMasterReplication.java | 1 - .../TestMultiSlaveReplication.java | 1 - .../TestPerTableCFReplication.java | 1 - .../replication/TestReplicationBase.java | 4 +-- .../replication/TestReplicationWithTags.java | 2 -- .../regionserver/TestReplicationSink.java | 1 - ...bilityLabelReplicationWithExpAsString.java | 2 -- .../TestVisibilityLabelsReplication.java | 2 -- .../apache/hadoop/hbase/util/TestFSUtils.java | 5 +-- .../hadoop/hbase/wal/TestWALFactory.java | 2 -- .../apache/hadoop/hbase/wal/TestWALSplit.java | 4 +-- .../hbase/zookeeper/TestZooKeeperACL.java | 1 - .../asciidoc/_chapters/configuration.adoc | 21 +---------- 26 files changed, 6 insertions(+), 109 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 3f77610073d..15f079da6e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -83,7 +83,6 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; @@ -1267,6 +1266,7 @@ public abstract class FSUtils { super(fs, HConstants.HBASE_NON_TABLE_DIRS); } + @Override protected boolean isValidName(final String name) { if (!super.isValidName(name)) return false; @@ -1281,39 +1281,6 @@ public abstract class FSUtils { } } - /** - * Heuristic to determine whether is safe or not to open a file for append - * Looks both for dfs.support.append and use reflection to search - * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush() - * @param conf - * @return True if append support - */ - public static boolean isAppendSupported(final Configuration conf) { - boolean append = conf.getBoolean("dfs.support.append", false); - if (append) { - try { - // TODO: The implementation that comes back when we do a createWriter - // may not be using SequenceFile so the below is not a definitive test. - // Will do for now (hdfs-200). - SequenceFile.Writer.class.getMethod("syncFs", new Class []{}); - append = true; - } catch (SecurityException e) { - } catch (NoSuchMethodException e) { - append = false; - } - } - if (!append) { - // Look for the 0.21, 0.22, new-style append evidence. - try { - FSDataOutputStream.class.getMethod("hflush", new Class []{}); - append = true; - } catch (NoSuchMethodException e) { - append = false; - } - } - return append; - } - /** * @param conf * @return True if this filesystem whose scheme is 'hdfs'. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java index 8c2207cad7f..6640a9846f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java @@ -46,7 +46,6 @@ public class TestFullLogReconstruction { @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); - c.setBoolean("dfs.support.append", true); // quicker heartbeat interval for faster DN death notification c.setInt("dfs.namenode.heartbeat.recheck-interval", 5000); c.setInt("dfs.heartbeat.interval", 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 3c10ddc1e92..0217b4187bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -248,7 +248,6 @@ public class TestIOFencing { Configuration c = TEST_UTIL.getConfiguration(); // Insert our custom region c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class); - c.setBoolean("dfs.support.append", true); // Encourage plenty of flushes c.setLong("hbase.hregion.memstore.flush.size", 100000); c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); @@ -359,4 +358,4 @@ public class TestIOFencing { TEST_UTIL.shutdownMiniCluster(); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 77d01e2ae78..4feebcc6bbf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -89,7 +89,6 @@ public class TestZooKeeper { Configuration conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniDFSCluster(2); TEST_UTIL.startMiniZKCluster(); - conf.setBoolean("dfs.support.append", true); conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000); conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MockLoadBalancer.class, LoadBalancer.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 320363679cf..126eaa95075 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -582,8 +582,6 @@ public class TestAdmin2 { 2 * 1000); /**** configuration for testLogRollOnDatanodeDeath ****/ - // make sure log.hflush() calls syncFs() to open a pipeline - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); // lower the namenode & datanode heartbeat so the namenode // quickly detects datanode failures TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 8da76a6adfb..3c591f8eb5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -113,7 +113,6 @@ public class TestWALObserver { SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, SampleRegionWALObserver.class.getName()); - conf.setBoolean("dfs.support.append", true); conf.setInt("dfs.client.block.recovery.retries", 2); TEST_UTIL.startMiniCluster(1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index 2a68276677f..3180c503585 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -94,7 +94,6 @@ public class TestBlockReorder { public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks - htu.getConfiguration().setBoolean("dfs.support.append", true); htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index b973a36959c..1d8a761b170 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -62,7 +62,6 @@ public class TestMasterTransitions { * @throws Exception */ @BeforeClass public static void beforeAllTests() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(2); // Create a table of three families. This will assign a region. TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java index bc11e53b0b7..4524a3746a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java @@ -66,7 +66,6 @@ public class TestDeleteColumnFamilyProcedureFromClient { */ @BeforeClass public static void beforeAllTests() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(2); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java index a4267a05855..bac57d439bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java @@ -87,8 +87,6 @@ public abstract class AbstractTestProtobufLog { // Make block sizes small. TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024); // needed for testAppendClose() - TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true); - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); // quicker heartbeat interval for faster DN death notification TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index a9746395e09..faa539efb18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -136,7 +136,6 @@ public abstract class AbstractTestWALReplay { @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - conf.setBoolean("dfs.support.append", true); // The below config supported by 0.20-append and CDH3b2 conf.setInt("dfs.client.block.recovery.retries", 2); TEST_UTIL.startMiniCluster(3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 788828a7aca..9851815ac33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import static org.junit.Assert.assertTrue; - import java.io.FileNotFoundException; import java.io.IOException; import java.util.NavigableMap; @@ -47,7 +45,6 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKey; @@ -88,8 +85,6 @@ public class TestLogRollAbort { // Increase the amount of time between client retries TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 5 * 1000); - // make sure log.hflush() calls syncFs() to open a pipeline - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); // lower the namenode & datanode heartbeat so the namenode // quickly detects datanode failures TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); @@ -145,10 +140,6 @@ public class TestLogRollAbort { HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName); WAL log = server.getWAL(null); - // don't run this test without append support (HDFS-200 & HDFS-142) - assertTrue("Need append support for this test", - FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); - Put p = new Put(Bytes.toBytes("row2001")); p.addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2001)); table.put(p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index bf46b03dc89..e4d4c5b3109 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -72,8 +72,6 @@ public class TestLogRolling extends AbstractTestLogRolling { System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); /**** configuration for testLogRollOnDatanodeDeath ****/ - // make sure log.hflush() calls syncFs() to open a pipeline - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); // lower the namenode & datanode heartbeat so the namenode // quickly detects datanode failures TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); @@ -148,10 +146,6 @@ public class TestLogRolling extends AbstractTestLogRolling { } }); - // don't run this test without append support (HDFS-200 & HDFS-142) - assertTrue("Need append support for this test", - FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); - // add up the datanode count, to ensure proper replication when we kill 1 // This function is synchronous; when it returns, the dfs cluster is active // We start 3 servers and then stop 2 to avoid a directory naming conflict @@ -267,10 +261,6 @@ public class TestLogRolling extends AbstractTestLogRolling { } }); - // don't run this test without append support (HDFS-200 & HDFS-142) - assertTrue("Need append support for this test", - FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); - writeData(table, 1002); long curTime = System.currentTimeMillis(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index f77bafedf42..5b8538be4de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -121,7 +121,6 @@ public class TestMasterReplication { baseConfiguration.set("hbase.replication.source.fs.conf.provider", TestSourceFSConfigurationProvider.class.getCanonicalName()); baseConfiguration.set(HConstants.REPLICATION_CLUSTER_ID, "12345"); - baseConfiguration.setBoolean("dfs.support.append", true); baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); baseConfiguration.setStrings( CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index 4b46fdda8b5..d173d8345ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -94,7 +94,6 @@ public class TestMultiSlaveReplication { conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); - conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index 9c314f4eb7d..7cc2e2bbb84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -102,7 +102,6 @@ public class TestPerTableCFReplication { conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); - conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 526bc505811..b3739fb2058 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -97,7 +97,6 @@ public class TestReplicationBase { conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setInt("zookeeper.recovery.retry", 1); conf1.setInt("zookeeper.recovery.retry.intervalmill", 10); - conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); @@ -119,7 +118,6 @@ public class TestReplicationBase { conf2 = HBaseConfiguration.create(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility2 = new HBaseTestingUtility(conf2); @@ -175,4 +173,4 @@ public class TestReplicationBase { utility2.shutdownMiniCluster(); utility1.shutdownMiniCluster(); } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index b2a9611f86f..af0e3577111 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -101,7 +101,6 @@ public class TestReplicationWithTags { conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setInt("zookeeper.recovery.retry", 1); conf1.setInt("zookeeper.recovery.retry.intervalmill", 10); - conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); @@ -123,7 +122,6 @@ public class TestReplicationWithTags { conf2.setInt("hfile.format.version", 3); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index 01e78711057..049ca8e64d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -115,7 +115,6 @@ public class TestReplicationSink { */ @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.getConfiguration().set("hbase.replication.source.fs.conf.provider", TestSourceFSConfigurationProvider.class.getCanonicalName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 573fc20cb10..fafa500e6a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -88,7 +88,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit conf.setLong("hbase.master.logcleaner.ttl", 10); conf.setInt("zookeeper.recovery.retry", 1); conf.setInt("zookeeper.recovery.retry.intervalmill", 10); - conf.setBoolean("dfs.support.append", true); conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf.setInt("replication.stats.thread.period.seconds", 5); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); @@ -121,7 +120,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit conf1.setInt("hfile.format.version", 3); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - conf1.setBoolean("dfs.support.append", true); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 65a20efd171..a62a28194e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -137,7 +137,6 @@ public class TestVisibilityLabelsReplication { conf.setLong("hbase.master.logcleaner.ttl", 10); conf.setInt("zookeeper.recovery.retry", 1); conf.setInt("zookeeper.recovery.retry.intervalmill", 10); - conf.setBoolean("dfs.support.append", true); conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf.setInt("replication.stats.thread.period.seconds", 5); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); @@ -169,7 +168,6 @@ public class TestVisibilityLabelsReplication { conf1.setInt("hfile.format.version", 3); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - conf1.setBoolean("dfs.support.append", true); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index 7e5dc7199dc..0d8e2ef4b4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -109,14 +109,11 @@ public class TestFSUtils { @Test public void testIsHDFS() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); - htu.getConfiguration().setBoolean("dfs.support.append", false); assertFalse(FSUtils.isHDFS(htu.getConfiguration())); - htu.getConfiguration().setBoolean("dfs.support.append", true); MiniDFSCluster cluster = null; try { cluster = htu.startMiniDFSCluster(1); assertTrue(FSUtils.isHDFS(htu.getConfiguration())); - assertTrue(FSUtils.isAppendSupported(htu.getConfiguration())); } finally { if (cluster != null) cluster.shutdown(); } @@ -486,7 +483,7 @@ public class TestFSUtils { res = e; } assertTrue("Error reading beyond file boundary.", res != null); - + stm.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index fb1afa677c2..74445cff794 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -124,8 +124,6 @@ public class TestWALFactory { // Make block sizes small. TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024); // needed for testAppendClose() - TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true); - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); // quicker heartbeat interval for faster DN death notification TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index 467fcb475c3..3136416576c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -151,8 +151,6 @@ public class TestWALSplit { conf = TEST_UTIL.getConfiguration(); conf.setClass("hbase.regionserver.hlog.writer.impl", InstrumentedLogWriter.class, Writer.class); - conf.setBoolean("dfs.support.broken.append", true); - conf.setBoolean("dfs.support.append", true); // This is how you turn off shortcircuit read currently. TODO: Fix. Should read config. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); // Create fake maping user to group and set it to the conf. @@ -1369,4 +1367,4 @@ public class TestWALSplit { in2.close(); return true; } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java index 3f474b451f0..73263279db9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java @@ -72,7 +72,6 @@ public class TestZooKeeperACL { System.setProperty("zookeeper.authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"); - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.maxClientCnxns", 1000); // If Hadoop is missing HADOOP-7070 the cluster will fail to start due to diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 3738dd918b4..8dc3e8a1fcc 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -337,26 +337,7 @@ Do not move to Apache HBase 0.96.x if you cannot upgrade your Hadoop. See link:h [[hadoop.older.versions]] ==== Hadoop versions 0.20.x - 1.x -HBase will lose data unless it is running on an HDFS that has a durable `sync` implementation. -DO NOT use Hadoop 0.20.2, Hadoop 0.20.203.0, and Hadoop 0.20.204.0 which DO NOT have this attribute. -Currently only Hadoop versions 0.20.205.x or any release in excess of this version -- this includes hadoop-1.0.0 -- have a working, durable sync. -The Cloudera blog post link:https://blog.cloudera.com/blog/2012/01/an-update-on-apache-hadoop-1-0/[An - update on Apache Hadoop 1.0] by Charles Zedlweski has a nice exposition on how all the Hadoop versions relate. -It's worth checking out if you are having trouble making sense of the Hadoop version morass. - -Sync has to be explicitly enabled by setting `dfs.support.append` equal to true on both the client side -- in _hbase-site.xml_ -- and on the serverside in _hdfs-site.xml_ (The sync facility HBase needs is a subset of the append code path). - -[source,xml] ----- - - - dfs.support.append - true - ----- - -You will have to restart your cluster after making this edit. -Ignore the chicken-little comment you'll find in the _hdfs-default.xml_ in the description for the `dfs.support.append` configuration. +DO NOT use Hadoop versions older than 2.2.0 for HBase versions greater than 1.0. Check release documentation if you are using an older version of HBase for Hadoop related information. [[hadoop.security]] ==== Apache HBase on Secure Hadoop