HBASE-16474 Remove dfs.support.append related code and documentation

This commit is contained in:
Enis Soztutar 2016-08-24 03:41:54 -07:00
parent c11923d8aa
commit cbfd6eecbd
26 changed files with 6 additions and 109 deletions

View File

@ -83,7 +83,6 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
@ -1267,6 +1266,7 @@ public abstract class FSUtils {
super(fs, HConstants.HBASE_NON_TABLE_DIRS);
}
@Override
protected boolean isValidName(final String name) {
if (!super.isValidName(name))
return false;
@ -1281,39 +1281,6 @@ public abstract class FSUtils {
}
}
/**
* Heuristic to determine whether is safe or not to open a file for append
* Looks both for dfs.support.append and use reflection to search
* for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
* @param conf
* @return True if append support
*/
public static boolean isAppendSupported(final Configuration conf) {
boolean append = conf.getBoolean("dfs.support.append", false);
if (append) {
try {
// TODO: The implementation that comes back when we do a createWriter
// may not be using SequenceFile so the below is not a definitive test.
// Will do for now (hdfs-200).
SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
append = true;
} catch (SecurityException e) {
} catch (NoSuchMethodException e) {
append = false;
}
}
if (!append) {
// Look for the 0.21, 0.22, new-style append evidence.
try {
FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
append = true;
} catch (NoSuchMethodException e) {
append = false;
}
}
return append;
}
/**
* @param conf
* @return True if this filesystem whose scheme is 'hdfs'.

View File

@ -46,7 +46,6 @@ public class TestFullLogReconstruction {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration c = TEST_UTIL.getConfiguration();
c.setBoolean("dfs.support.append", true);
// quicker heartbeat interval for faster DN death notification
c.setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
c.setInt("dfs.heartbeat.interval", 1);

View File

@ -248,7 +248,6 @@ public class TestIOFencing {
Configuration c = TEST_UTIL.getConfiguration();
// Insert our custom region
c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
c.setBoolean("dfs.support.append", true);
// Encourage plenty of flushes
c.setLong("hbase.hregion.memstore.flush.size", 100000);
c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());
@ -359,4 +358,4 @@ public class TestIOFencing {
TEST_UTIL.shutdownMiniCluster();
}
}
}
}

View File

@ -89,7 +89,6 @@ public class TestZooKeeper {
Configuration conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniDFSCluster(2);
TEST_UTIL.startMiniZKCluster();
conf.setBoolean("dfs.support.append", true);
conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MockLoadBalancer.class,
LoadBalancer.class);

View File

@ -582,8 +582,6 @@ public class TestAdmin2 {
2 * 1000);
/**** configuration for testLogRollOnDatanodeDeath ****/
// make sure log.hflush() calls syncFs() to open a pipeline
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);

View File

@ -113,7 +113,6 @@ public class TestWALObserver {
SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName());
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
SampleRegionWALObserver.class.getName());
conf.setBoolean("dfs.support.append", true);
conf.setInt("dfs.client.block.recovery.retries", 2);
TEST_UTIL.startMiniCluster(1);

View File

@ -94,7 +94,6 @@ public class TestBlockReorder {
public void setUp() throws Exception {
htu = new HBaseTestingUtility();
htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
htu.getConfiguration().setBoolean("dfs.support.append", true);
htu.getConfiguration().setInt("dfs.replication", 3);
htu.startMiniDFSCluster(3,
new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});

View File

@ -62,7 +62,6 @@ public class TestMasterTransitions {
* @throws Exception
*/
@BeforeClass public static void beforeAllTests() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
TEST_UTIL.startMiniCluster(2);
// Create a table of three families. This will assign a region.
TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);

View File

@ -66,7 +66,6 @@ public class TestDeleteColumnFamilyProcedureFromClient {
*/
@BeforeClass
public static void beforeAllTests() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
TEST_UTIL.startMiniCluster(2);
}

View File

@ -87,8 +87,6 @@ public abstract class AbstractTestProtobufLog<W extends Closeable> {
// Make block sizes small.
TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
// needed for testAppendClose()
TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// quicker heartbeat interval for faster DN death notification
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);

View File

@ -136,7 +136,6 @@ public abstract class AbstractTestWALReplay {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean("dfs.support.append", true);
// The below config supported by 0.20-append and CDH3b2
conf.setInt("dfs.client.block.recovery.retries", 2);
TEST_UTIL.startMiniCluster(3);

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hbase.regionserver.wal;
import static org.junit.Assert.assertTrue;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.NavigableMap;
@ -47,7 +45,6 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.hadoop.hbase.wal.FSHLogProvider;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALKey;
@ -88,8 +85,6 @@ public class TestLogRollAbort {
// Increase the amount of time between client retries
TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 5 * 1000);
// make sure log.hflush() calls syncFs() to open a pipeline
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
@ -145,10 +140,6 @@ public class TestLogRollAbort {
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
WAL log = server.getWAL(null);
// don't run this test without append support (HDFS-200 & HDFS-142)
assertTrue("Need append support for this test",
FSUtils.isAppendSupported(TEST_UTIL.getConfiguration()));
Put p = new Put(Bytes.toBytes("row2001"));
p.addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2001));
table.put(p);

View File

@ -72,8 +72,6 @@ public class TestLogRolling extends AbstractTestLogRolling {
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
/**** configuration for testLogRollOnDatanodeDeath ****/
// make sure log.hflush() calls syncFs() to open a pipeline
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
@ -148,10 +146,6 @@ public class TestLogRolling extends AbstractTestLogRolling {
}
});
// don't run this test without append support (HDFS-200 & HDFS-142)
assertTrue("Need append support for this test",
FSUtils.isAppendSupported(TEST_UTIL.getConfiguration()));
// add up the datanode count, to ensure proper replication when we kill 1
// This function is synchronous; when it returns, the dfs cluster is active
// We start 3 servers and then stop 2 to avoid a directory naming conflict
@ -267,10 +261,6 @@ public class TestLogRolling extends AbstractTestLogRolling {
}
});
// don't run this test without append support (HDFS-200 & HDFS-142)
assertTrue("Need append support for this test",
FSUtils.isAppendSupported(TEST_UTIL.getConfiguration()));
writeData(table, 1002);
long curTime = System.currentTimeMillis();

View File

@ -121,7 +121,6 @@ public class TestMasterReplication {
baseConfiguration.set("hbase.replication.source.fs.conf.provider",
TestSourceFSConfigurationProvider.class.getCanonicalName());
baseConfiguration.set(HConstants.REPLICATION_CLUSTER_ID, "12345");
baseConfiguration.setBoolean("dfs.support.append", true);
baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
baseConfiguration.setStrings(
CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,

View File

@ -94,7 +94,6 @@ public class TestMultiSlaveReplication {
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
"org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");

View File

@ -102,7 +102,6 @@ public class TestPerTableCFReplication {
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
"org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");

View File

@ -97,7 +97,6 @@ public class TestReplicationBase {
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
@ -119,7 +118,6 @@ public class TestReplicationBase {
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility2 = new HBaseTestingUtility(conf2);
@ -175,4 +173,4 @@ public class TestReplicationBase {
utility2.shutdownMiniCluster();
utility1.shutdownMiniCluster();
}
}
}

View File

@ -101,7 +101,6 @@ public class TestReplicationWithTags {
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
@ -123,7 +122,6 @@ public class TestReplicationWithTags {
conf2.setInt("hfile.format.version", 3);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,

View File

@ -115,7 +115,6 @@ public class TestReplicationSink {
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
TEST_UTIL.getConfiguration().set("hbase.replication.source.fs.conf.provider",
TestSourceFSConfigurationProvider.class.getCanonicalName());

View File

@ -88,7 +88,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
conf.setLong("hbase.master.logcleaner.ttl", 10);
conf.setInt("zookeeper.recovery.retry", 1);
conf.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf.setBoolean("dfs.support.append", true);
conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
@ -121,7 +120,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
conf1.setInt("hfile.format.version", 3);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf1.setBoolean("dfs.support.append", true);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,

View File

@ -137,7 +137,6 @@ public class TestVisibilityLabelsReplication {
conf.setLong("hbase.master.logcleaner.ttl", 10);
conf.setInt("zookeeper.recovery.retry", 1);
conf.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf.setBoolean("dfs.support.append", true);
conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
@ -169,7 +168,6 @@ public class TestVisibilityLabelsReplication {
conf1.setInt("hfile.format.version", 3);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf1.setBoolean("dfs.support.append", true);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,

View File

@ -109,14 +109,11 @@ public class TestFSUtils {
@Test public void testIsHDFS() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
htu.getConfiguration().setBoolean("dfs.support.append", false);
assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
htu.getConfiguration().setBoolean("dfs.support.append", true);
MiniDFSCluster cluster = null;
try {
cluster = htu.startMiniDFSCluster(1);
assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
} finally {
if (cluster != null) cluster.shutdown();
}
@ -486,7 +483,7 @@ public class TestFSUtils {
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
stm.close();
}

View File

@ -124,8 +124,6 @@ public class TestWALFactory {
// Make block sizes small.
TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
// needed for testAppendClose()
TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// quicker heartbeat interval for faster DN death notification
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);

View File

@ -151,8 +151,6 @@ public class TestWALSplit {
conf = TEST_UTIL.getConfiguration();
conf.setClass("hbase.regionserver.hlog.writer.impl",
InstrumentedLogWriter.class, Writer.class);
conf.setBoolean("dfs.support.broken.append", true);
conf.setBoolean("dfs.support.append", true);
// This is how you turn off shortcircuit read currently. TODO: Fix. Should read config.
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
// Create fake maping user to group and set it to the conf.
@ -1369,4 +1367,4 @@ public class TestWALSplit {
in2.close();
return true;
}
}
}

View File

@ -72,7 +72,6 @@ public class TestZooKeeperACL {
System.setProperty("zookeeper.authProvider.1",
"org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.maxClientCnxns", 1000);
// If Hadoop is missing HADOOP-7070 the cluster will fail to start due to

View File

@ -337,26 +337,7 @@ Do not move to Apache HBase 0.96.x if you cannot upgrade your Hadoop. See link:h
[[hadoop.older.versions]]
==== Hadoop versions 0.20.x - 1.x
HBase will lose data unless it is running on an HDFS that has a durable `sync` implementation.
DO NOT use Hadoop 0.20.2, Hadoop 0.20.203.0, and Hadoop 0.20.204.0 which DO NOT have this attribute.
Currently only Hadoop versions 0.20.205.x or any release in excess of this version -- this includes hadoop-1.0.0 -- have a working, durable sync.
The Cloudera blog post link:https://blog.cloudera.com/blog/2012/01/an-update-on-apache-hadoop-1-0/[An
update on Apache Hadoop 1.0] by Charles Zedlweski has a nice exposition on how all the Hadoop versions relate.
It's worth checking out if you are having trouble making sense of the Hadoop version morass.
Sync has to be explicitly enabled by setting `dfs.support.append` equal to true on both the client side -- in _hbase-site.xml_ -- and on the serverside in _hdfs-site.xml_ (The sync facility HBase needs is a subset of the append code path).
[source,xml]
----
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
----
You will have to restart your cluster after making this edit.
Ignore the chicken-little comment you'll find in the _hdfs-default.xml_ in the description for the `dfs.support.append` configuration.
DO NOT use Hadoop versions older than 2.2.0 for HBase versions greater than 1.0. Check release documentation if you are using an older version of HBase for Hadoop related information.
[[hadoop.security]]
==== Apache HBase on Secure Hadoop