HBASE-22594 Clean up for backup examples (#315)

Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Jan Hentschel 2019-07-25 07:35:44 +02:00 committed by Duo Zhang
parent ff9589c9ab
commit 62710d5835
6 changed files with 65 additions and 40 deletions

View File

@ -40,6 +40,7 @@
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestServletFilter.java"/>
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestGlobalFilter.java"/>
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestPathFilter.java"/>
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.backup.example.TestZooKeeperTableArchiveClient.java"/>
<suppress checks="EqualsHashCode" files="org.apache.hadoop.hbase.favored.StartcodeAgnosticServerName.java"/>
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.tool.coprocessor.Branch1CoprocessorMethods.java"/>
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>

View File

@ -21,12 +21,12 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -68,8 +68,8 @@ class HFileArchiveManager {
/**
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another
* cleaner.
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are
* retained by another cleaner.
* @param table name of the table for which to disable hfile retention.
* @return <tt>this</tt> for chaining.
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
@ -95,17 +95,16 @@ class HFileArchiveManager {
}
/**
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the //
* * change back to the hfile cleaner.
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the
* change back to the hfile cleaner.
* <p>
* No attempt is made to make sure that backups are successfully created - it is inherently an
* <b>asynchronous operation</b>.
* @param zooKeeper watcher connection to zk cluster
* @param table table name on which to enable archiving
* @throws KeeperException
* @throws KeeperException if a ZooKeeper operation fails
*/
private void enable(ZKWatcher zooKeeper, byte[] table)
throws KeeperException {
private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
LOG.debug("Ensuring archiving znode exists");
ZKUtil.createAndFailSilent(zooKeeper, archiveZnode);

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.backup.example;
import java.io.IOException;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -27,6 +26,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
public boolean isFileDeletable(FileStatus fStat) {
try {
// if its a directory, then it can be deleted
if (fStat.isDirectory()) return true;
if (fStat.isDirectory()) {
return true;
}
Path file = fStat.getPath();
// check to see if
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
// if the file doesn't exist, then it can be deleted (but should never
// happen since deleted files shouldn't get passed in)
if (deleteStatus == null) return true;
if (deleteStatus == null) {
return true;
}
// otherwise, we need to check the file's table and see its being archived
Path family = file.getParent();
@ -69,7 +73,8 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
String tableName = table.getName();
boolean ret = !archiveTracker.keepHFiles(tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
tableName);
return ret;
} catch (IOException e) {
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
@ -97,13 +102,14 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
@Override
public void stop(String reason) {
if (this.isStopped()) return;
if (this.isStopped()) {
return;
}
super.stop(reason);
if (this.archiveTracker != null) {
LOG.info("Stopping " + this.archiveTracker);
this.archiveTracker.stop();
}
}
}

View File

@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.backup.example;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKListener;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
* archive.
*/
@InterfaceAudience.Private
public class TableHFileArchiveTracker extends ZKListener {
public final class TableHFileArchiveTracker extends ZKListener {
private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class);
public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive";
private HFileArchiveTableMonitor monitor;
@ -67,7 +67,9 @@ public class TableHFileArchiveTracker extends ZKListener {
@Override
public void nodeCreated(String path) {
// if it is the archive path
if (!path.startsWith(archiveHFileZNode)) return;
if (!path.startsWith(archiveHFileZNode)) {
return;
}
LOG.debug("Archive node: " + path + " created");
// since we are already enabled, just update a single table
@ -75,7 +77,6 @@ public class TableHFileArchiveTracker extends ZKListener {
// the top level node has come up, so read in all the tables
if (table.length() == 0) {
checkEnabledAndUpdate();
return;
}
@ -90,7 +91,9 @@ public class TableHFileArchiveTracker extends ZKListener {
@Override
public void nodeChildrenChanged(String path) {
if (!path.startsWith(archiveHFileZNode)) return;
if (!path.startsWith(archiveHFileZNode)) {
return;
}
LOG.debug("Archive node: " + path + " children changed.");
// a table was added to the archive
@ -134,7 +137,9 @@ public class TableHFileArchiveTracker extends ZKListener {
@Override
public void nodeDeleted(String path) {
if (!path.startsWith(archiveHFileZNode)) return;
if (!path.startsWith(archiveHFileZNode)) {
return;
}
LOG.debug("Archive node: " + path + " deleted");
String table = path.substring(archiveHFileZNode.length());
@ -260,7 +265,10 @@ public class TableHFileArchiveTracker extends ZKListener {
* Stop this tracker and the passed zookeeper
*/
public void stop() {
if (this.stopped) return;
if (this.stopped) {
return;
}
this.stopped = true;
this.watcher.close();
}

View File

@ -18,14 +18,13 @@
package org.apache.hadoop.hbase.backup.example;
import java.io.IOException;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
/**
@ -110,7 +109,7 @@ public class ZKTableArchiveClient extends Configured {
* @param table name of the table to check
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
* @throws IOException if a connection to ZooKeeper cannot be established
* @throws KeeperException
* @throws KeeperException if a ZooKeeper operation fails
*/
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
HFileArchiveManager manager = createHFileArchiveManager();

View File

@ -269,8 +269,11 @@ public class TestZooKeeperTableArchiveClient {
for (Path file : files) {
String tableName = file.getParent().getParent().getParent().getName();
// check to which table this file belongs
if (tableName.equals(otherTable)) initialCountForOtherTable++;
else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
if (tableName.equals(otherTable)) {
initialCountForOtherTable++;
} else if (tableName.equals(STRING_TABLE_NAME)) {
initialCountForPrimary++;
}
}
assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
@ -293,11 +296,13 @@ public class TestZooKeeperTableArchiveClient {
String tableName = file.getParent().getParent().getParent().getName();
// ensure we don't have files from the non-archived table
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++;
if (tableName.equals(STRING_TABLE_NAME)) {
archivedForPrimary++;
}
}
assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary,
archivedForPrimary);
assertEquals("Not all archived files for the primary table were retained.",
initialCountForPrimary, archivedForPrimary);
// but we still have the archive directory
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
@ -374,7 +379,10 @@ public class TestZooKeeperTableArchiveClient {
@SuppressWarnings("unchecked")
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
if (counter[0] >= expected) finished.countDown();
if (counter[0] >= expected) {
finished.countDown();
}
return ret;
}
}).when(delegateSpy).getDeletableFiles(Mockito.anyListOf(FileStatus.class));
@ -399,7 +407,11 @@ public class TestZooKeeperTableArchiveClient {
for (FileStatus file : files) {
if (file.isDirectory()) {
List<Path> subFiles = getAllFiles(fs, file.getPath());
if (subFiles != null) allFiles.addAll(subFiles);
if (subFiles != null) {
allFiles.addAll(subFiles);
}
continue;
}
allFiles.add(file.getPath());
@ -426,7 +438,7 @@ public class TestZooKeeperTableArchiveClient {
* Create a new hfile in the passed region
* @param region region to operate on
* @param columnFamily family for which to add data
* @throws IOException
* @throws IOException if doing the put or flush fails
*/
private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
// put one row in the region
@ -438,7 +450,7 @@ public class TestZooKeeperTableArchiveClient {
}
/**
* @param cleaner
* @param cleaner the cleaner to use
*/
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
throws InterruptedException {