HBASE-20691 Change the default WAL storage policy back to "NONE""

This reverts commit 564c193d61 and added more doc
about why we choose "NONE" as the default.
This commit is contained in:
Yu Li 2018-06-07 14:12:55 +08:00
parent ee3990e42c
commit ec8947f226
5 changed files with 105 additions and 41 deletions

View File

@ -1077,7 +1077,13 @@ public final class HConstants {
* Valid values are: HOT, COLD, WARM, ALL_SSD, ONE_SSD, LAZY_PERSIST
* See http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
public static final String DEFAULT_WAL_STORAGE_POLICY = "HOT";
/**
* "NONE" is not a valid storage policy and means we defer the policy to HDFS. @see
* <a href="https://issues.apache.org/jira/browse/HBASE-20691">HBASE-20691</a>
*/
public static final String DEFER_TO_HDFS_STORAGE_POLICY = "NONE";
/** By default we defer the WAL storage policy to HDFS */
public static final String DEFAULT_WAL_STORAGE_POLICY = DEFER_TO_HDFS_STORAGE_POLICY;
/** Region in Transition metrics threshold time */
public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD =

View File

@ -454,36 +454,6 @@ public abstract class CommonFSUtils {
new Path(namespace)));
}
/**
* Sets storage policy for given path according to config setting.
* If the passed path is a directory, we'll set the storage policy for all files
* created in the future in said directory. Note that this change in storage
* policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle.
* If we're running on a FileSystem implementation that doesn't support the given storage policy
* (or storage policies at all), then we'll issue a log message and continue.
*
* See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
*
* @param fs We only do anything it implements a setStoragePolicy method
* @param conf used to look up storage policy with given key; not modified.
* @param path the Path whose storage policy is to be set
* @param policyKey Key to use pulling a policy from Configuration:
* e.g. HConstants.WAL_STORAGE_POLICY (hbase.wal.storage.policy).
* @param defaultPolicy if the configured policy is equal to this policy name, we will skip
* telling the FileSystem to set a storage policy.
*/
public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
final Path path, final String policyKey, final String defaultPolicy) {
String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(Locale.ROOT);
if (storagePolicy.equals(defaultPolicy)) {
if (LOG.isTraceEnabled()) {
LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
}
return;
}
setStoragePolicy(fs, path, storagePolicy);
}
// this mapping means that under a federated FileSystem implementation, we'll
// only log the first failure from any of the underlying FileSystems at WARN and all others
// will be at DEBUG.
@ -508,33 +478,63 @@ public abstract class CommonFSUtils {
*/
public static void setStoragePolicy(final FileSystem fs, final Path path,
final String storagePolicy) {
try {
setStoragePolicy(fs, path, storagePolicy, false);
} catch (IOException e) {
// should never arrive here
LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);
}
}
static void setStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy,
boolean throwException) throws IOException {
if (storagePolicy == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("We were passed a null storagePolicy, exiting early.");
}
return;
}
final String trimmedStoragePolicy = storagePolicy.trim();
String trimmedStoragePolicy = storagePolicy.trim();
if (trimmedStoragePolicy.isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace("We were passed an empty storagePolicy, exiting early.");
}
return;
} else {
trimmedStoragePolicy = trimmedStoragePolicy.toUpperCase(Locale.ROOT);
}
if (trimmedStoragePolicy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)) {
if (LOG.isTraceEnabled()) {
LOG.trace("We were passed the defer-to-hdfs policy {}, exiting early.",
trimmedStoragePolicy);
}
return;
}
try {
invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
} catch (IOException e) {
if (LOG.isTraceEnabled()) {
LOG.trace("Failed to invoke set storage policy API on FS", e);
}
if (throwException) {
throw e;
}
}
invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
}
/*
* All args have been checked and are good. Run the setStoragePolicy invocation.
*/
private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
final String storagePolicy) {
final String storagePolicy) throws IOException {
Method m = null;
Exception toThrow = null;
try {
m = fs.getClass().getDeclaredMethod("setStoragePolicy",
new Class<?>[] { Path.class, String.class });
m.setAccessible(true);
} catch (NoSuchMethodException e) {
toThrow = e;
final String msg = "FileSystem doesn't support setStoragePolicy; HDFS-6584, HDFS-9345 " +
"not available. This is normal and expected on earlier Hadoop versions.";
if (!warningMap.containsKey(fs)) {
@ -545,6 +545,7 @@ public abstract class CommonFSUtils {
}
m = null;
} catch (SecurityException e) {
toThrow = e;
final String msg = "No access to setStoragePolicy on FileSystem from the SecurityManager; " +
"HDFS-6584, HDFS-9345 not available. This is unusual and probably warrants an email " +
"to the user@hbase mailing list. Please be sure to include a link to your configs, and " +
@ -565,6 +566,7 @@ public abstract class CommonFSUtils {
LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
}
} catch (Exception e) {
toThrow = e;
// This swallows FNFE, should we be throwing it? seems more likely to indicate dev
// misuse than a runtime problem with HDFS.
if (!warningMap.containsKey(fs)) {
@ -603,6 +605,9 @@ public abstract class CommonFSUtils {
}
}
}
if (toThrow != null) {
throw new IOException(toThrow);
}
}
/**

View File

@ -216,8 +216,9 @@ public class WALProcedureStore extends ProcedureStoreBase {
}
}
// Now that it exists, set the log policy
CommonFSUtils.setStoragePolicy(fs, conf, walDir, HConstants.WAL_STORAGE_POLICY,
HConstants.DEFAULT_WAL_STORAGE_POLICY);
String storagePolicy =
conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
CommonFSUtils.setStoragePolicy(fs, walDir, storagePolicy);
// Create archive dir up front. Rename won't work w/o it up on HDFS.
if (this.walArchiveDir != null && !this.fs.exists(this.walArchiveDir)) {

View File

@ -363,8 +363,9 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
}
// Now that it exists, set the storage policy for the entire directory of wal files related to
// this FSHLog instance
CommonFSUtils.setStoragePolicy(fs, conf, this.walDir, HConstants.WAL_STORAGE_POLICY,
HConstants.DEFAULT_WAL_STORAGE_POLICY);
String storagePolicy =
conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
CommonFSUtils.setStoragePolicy(fs, this.walDir, storagePolicy);
this.walFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8");
this.prefixPathStr = new Path(walDir, walFilePrefix + WAL_FILE_NAME_DELIMITER).toString();

View File

@ -40,12 +40,15 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
@ -338,19 +341,54 @@ public class TestFSUtils {
@Test
public void testSetStoragePolicyDefault() throws Exception {
verifyNoHDFSApiInvocationForDefaultPolicy();
verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
}
/**
* Note: currently the default policy is set to defer to HDFS and this case is to verify the
* logic, will need to remove the check if the default policy is changed
*/
private void verifyNoHDFSApiInvocationForDefaultPolicy() {
FileSystem testFs = new AlwaysFailSetStoragePolicyFileSystem();
// There should be no exception thrown when setting to default storage policy, which indicates
// the HDFS API hasn't been called
try {
FSUtils.setStoragePolicy(testFs, new Path("non-exist"), HConstants.DEFAULT_WAL_STORAGE_POLICY,
true);
} catch (IOException e) {
Assert.fail("Should have bypassed the FS API when setting default storage policy");
}
// There should be exception thrown when given non-default storage policy, which indicates the
// HDFS API has been called
try {
FSUtils.setStoragePolicy(testFs, new Path("non-exist"), "HOT", true);
Assert.fail("Should have invoked the FS API but haven't");
} catch (IOException e) {
// expected given an invalid path
}
}
class AlwaysFailSetStoragePolicyFileSystem extends DistributedFileSystem {
@Override
public void setStoragePolicy(final Path src, final String policyName)
throws IOException {
throw new IOException("The setStoragePolicy method is invoked");
}
}
/* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */
@Test
public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
verifyFileInDirWithStoragePolicy("ALL_SSD");
}
final String INVALID_STORAGE_POLICY = "1772";
/* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */
@Test
public void testSetStoragePolicyInvalid() throws Exception {
verifyFileInDirWithStoragePolicy("1772");
verifyFileInDirWithStoragePolicy(INVALID_STORAGE_POLICY);
}
// Here instead of TestCommonFSUtils because we need a minicluster
@ -365,12 +403,25 @@ public class TestFSUtils {
Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
fs.mkdirs(testDir);
FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
HConstants.DEFAULT_WAL_STORAGE_POLICY);
String storagePolicy =
conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
FSUtils.setStoragePolicy(fs, testDir, storagePolicy);
String file =htu.getRandomUUID().toString();
Path p = new Path(testDir, file);
WriteDataToHDFS(fs, p, 4096);
HFileSystem hfs = new HFileSystem(fs);
String policySet = hfs.getStoragePolicyName(p);
LOG.debug("The storage policy of path " + p + " is " + policySet);
if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)
|| policy.equals(INVALID_STORAGE_POLICY)) {
String hdfsDefaultPolicy = hfs.getStoragePolicyName(hfs.getHomeDirectory());
LOG.debug("The default hdfs storage policy (indicated by home path: "
+ hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy);
Assert.assertEquals(hdfsDefaultPolicy, policySet);
} else {
Assert.assertEquals(policy, policySet);
}
// will assert existance before deleting.
cleanupFile(fs, testDir);
} finally {