HBASE-22594 Clean up for backup examples (#315)
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
parent
a65e72d5da
commit
e28afec9ec
|
@ -40,6 +40,7 @@
|
||||||
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestServletFilter.java"/>
|
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestServletFilter.java"/>
|
||||||
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestGlobalFilter.java"/>
|
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestGlobalFilter.java"/>
|
||||||
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestPathFilter.java"/>
|
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestPathFilter.java"/>
|
||||||
|
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.backup.example.TestZooKeeperTableArchiveClient.java"/>
|
||||||
<suppress checks="EqualsHashCode" files="org.apache.hadoop.hbase.favored.StartcodeAgnosticServerName.java"/>
|
<suppress checks="EqualsHashCode" files="org.apache.hadoop.hbase.favored.StartcodeAgnosticServerName.java"/>
|
||||||
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.tool.coprocessor.Branch1CoprocessorMethods.java"/>
|
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.tool.coprocessor.Branch1CoprocessorMethods.java"/>
|
||||||
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
|
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
|
||||||
|
|
|
@ -21,12 +21,12 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
|
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
|
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -68,8 +68,8 @@ class HFileArchiveManager {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
|
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
|
||||||
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another
|
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are
|
||||||
* cleaner.
|
* retained by another cleaner.
|
||||||
* @param table name of the table for which to disable hfile retention.
|
* @param table name of the table for which to disable hfile retention.
|
||||||
* @return <tt>this</tt> for chaining.
|
* @return <tt>this</tt> for chaining.
|
||||||
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
|
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
|
||||||
|
@ -95,17 +95,16 @@ class HFileArchiveManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the //
|
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the
|
||||||
* * change back to the hfile cleaner.
|
* change back to the hfile cleaner.
|
||||||
* <p>
|
* <p>
|
||||||
* No attempt is made to make sure that backups are successfully created - it is inherently an
|
* No attempt is made to make sure that backups are successfully created - it is inherently an
|
||||||
* <b>asynchronous operation</b>.
|
* <b>asynchronous operation</b>.
|
||||||
* @param zooKeeper watcher connection to zk cluster
|
* @param zooKeeper watcher connection to zk cluster
|
||||||
* @param table table name on which to enable archiving
|
* @param table table name on which to enable archiving
|
||||||
* @throws KeeperException
|
* @throws KeeperException if a ZooKeeper operation fails
|
||||||
*/
|
*/
|
||||||
private void enable(ZKWatcher zooKeeper, byte[] table)
|
private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
|
||||||
throws KeeperException {
|
|
||||||
LOG.debug("Ensuring archiving znode exists");
|
LOG.debug("Ensuring archiving znode exists");
|
||||||
ZKUtil.createAndFailSilent(zooKeeper, archiveZnode);
|
ZKUtil.createAndFailSilent(zooKeeper, archiveZnode);
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.backup.example;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -27,6 +26,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
|
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
|
||||||
public boolean isFileDeletable(FileStatus fStat) {
|
public boolean isFileDeletable(FileStatus fStat) {
|
||||||
try {
|
try {
|
||||||
// if its a directory, then it can be deleted
|
// if its a directory, then it can be deleted
|
||||||
if (fStat.isDirectory()) return true;
|
if (fStat.isDirectory()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
Path file = fStat.getPath();
|
Path file = fStat.getPath();
|
||||||
// check to see if
|
// check to see if
|
||||||
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
|
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
|
||||||
// if the file doesn't exist, then it can be deleted (but should never
|
// if the file doesn't exist, then it can be deleted (but should never
|
||||||
// happen since deleted files shouldn't get passed in)
|
// happen since deleted files shouldn't get passed in)
|
||||||
if (deleteStatus == null) return true;
|
if (deleteStatus == null) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// otherwise, we need to check the file's table and see its being archived
|
// otherwise, we need to check the file's table and see its being archived
|
||||||
Path family = file.getParent();
|
Path family = file.getParent();
|
||||||
|
@ -69,7 +73,8 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
|
||||||
|
|
||||||
String tableName = table.getName();
|
String tableName = table.getName();
|
||||||
boolean ret = !archiveTracker.keepHFiles(tableName);
|
boolean ret = !archiveTracker.keepHFiles(tableName);
|
||||||
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
|
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
|
||||||
|
tableName);
|
||||||
return ret;
|
return ret;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
|
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
|
||||||
|
@ -97,13 +102,14 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void stop(String reason) {
|
public void stop(String reason) {
|
||||||
if (this.isStopped()) return;
|
if (this.isStopped()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
super.stop(reason);
|
super.stop(reason);
|
||||||
if (this.archiveTracker != null) {
|
if (this.archiveTracker != null) {
|
||||||
LOG.info("Stopping " + this.archiveTracker);
|
LOG.info("Stopping " + this.archiveTracker);
|
||||||
this.archiveTracker.stop();
|
this.archiveTracker.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.backup.example;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKListener;
|
import org.apache.hadoop.hbase.zookeeper.ZKListener;
|
||||||
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
|
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
|
||||||
* archive.
|
* archive.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class TableHFileArchiveTracker extends ZKListener {
|
public final class TableHFileArchiveTracker extends ZKListener {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class);
|
||||||
public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive";
|
public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive";
|
||||||
private HFileArchiveTableMonitor monitor;
|
private HFileArchiveTableMonitor monitor;
|
||||||
|
@ -67,7 +67,9 @@ public class TableHFileArchiveTracker extends ZKListener {
|
||||||
@Override
|
@Override
|
||||||
public void nodeCreated(String path) {
|
public void nodeCreated(String path) {
|
||||||
// if it is the archive path
|
// if it is the archive path
|
||||||
if (!path.startsWith(archiveHFileZNode)) return;
|
if (!path.startsWith(archiveHFileZNode)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
LOG.debug("Archive node: " + path + " created");
|
LOG.debug("Archive node: " + path + " created");
|
||||||
// since we are already enabled, just update a single table
|
// since we are already enabled, just update a single table
|
||||||
|
@ -75,7 +77,6 @@ public class TableHFileArchiveTracker extends ZKListener {
|
||||||
|
|
||||||
// the top level node has come up, so read in all the tables
|
// the top level node has come up, so read in all the tables
|
||||||
if (table.length() == 0) {
|
if (table.length() == 0) {
|
||||||
|
|
||||||
checkEnabledAndUpdate();
|
checkEnabledAndUpdate();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -90,7 +91,9 @@ public class TableHFileArchiveTracker extends ZKListener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void nodeChildrenChanged(String path) {
|
public void nodeChildrenChanged(String path) {
|
||||||
if (!path.startsWith(archiveHFileZNode)) return;
|
if (!path.startsWith(archiveHFileZNode)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
LOG.debug("Archive node: " + path + " children changed.");
|
LOG.debug("Archive node: " + path + " children changed.");
|
||||||
// a table was added to the archive
|
// a table was added to the archive
|
||||||
|
@ -134,7 +137,9 @@ public class TableHFileArchiveTracker extends ZKListener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void nodeDeleted(String path) {
|
public void nodeDeleted(String path) {
|
||||||
if (!path.startsWith(archiveHFileZNode)) return;
|
if (!path.startsWith(archiveHFileZNode)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
LOG.debug("Archive node: " + path + " deleted");
|
LOG.debug("Archive node: " + path + " deleted");
|
||||||
String table = path.substring(archiveHFileZNode.length());
|
String table = path.substring(archiveHFileZNode.length());
|
||||||
|
@ -260,7 +265,10 @@ public class TableHFileArchiveTracker extends ZKListener {
|
||||||
* Stop this tracker and the passed zookeeper
|
* Stop this tracker and the passed zookeeper
|
||||||
*/
|
*/
|
||||||
public void stop() {
|
public void stop() {
|
||||||
if (this.stopped) return;
|
if (this.stopped) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
this.stopped = true;
|
this.stopped = true;
|
||||||
this.watcher.close();
|
this.watcher.close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hbase.backup.example;
|
package org.apache.hadoop.hbase.backup.example;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
@ -109,7 +110,7 @@ public class ZKTableArchiveClient extends Configured {
|
||||||
* @param table name of the table to check
|
* @param table name of the table to check
|
||||||
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
|
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
|
||||||
* @throws IOException if a connection to ZooKeeper cannot be established
|
* @throws IOException if a connection to ZooKeeper cannot be established
|
||||||
* @throws KeeperException
|
* @throws KeeperException if a ZooKeeper operation fails
|
||||||
*/
|
*/
|
||||||
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
|
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
|
||||||
HFileArchiveManager manager = createHFileArchiveManager();
|
HFileArchiveManager manager = createHFileArchiveManager();
|
||||||
|
|
|
@ -284,8 +284,11 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
for (Path file : files) {
|
for (Path file : files) {
|
||||||
String tableName = file.getParent().getParent().getParent().getName();
|
String tableName = file.getParent().getParent().getParent().getName();
|
||||||
// check to which table this file belongs
|
// check to which table this file belongs
|
||||||
if (tableName.equals(otherTable)) initialCountForOtherTable++;
|
if (tableName.equals(otherTable)) {
|
||||||
else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
|
initialCountForOtherTable++;
|
||||||
|
} else if (tableName.equals(STRING_TABLE_NAME)) {
|
||||||
|
initialCountForPrimary++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
|
assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
|
||||||
|
@ -308,11 +311,13 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
String tableName = file.getParent().getParent().getParent().getName();
|
String tableName = file.getParent().getParent().getParent().getName();
|
||||||
// ensure we don't have files from the non-archived table
|
// ensure we don't have files from the non-archived table
|
||||||
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
|
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
|
||||||
if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++;
|
if (tableName.equals(STRING_TABLE_NAME)) {
|
||||||
|
archivedForPrimary++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary,
|
assertEquals("Not all archived files for the primary table were retained.",
|
||||||
archivedForPrimary);
|
initialCountForPrimary, archivedForPrimary);
|
||||||
|
|
||||||
// but we still have the archive directory
|
// but we still have the archive directory
|
||||||
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
|
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
|
||||||
|
@ -389,7 +394,10 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
|
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
|
||||||
if (counter[0] >= expected) finished.countDown();
|
if (counter[0] >= expected) {
|
||||||
|
finished.countDown();
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}).when(delegateSpy).getDeletableFiles(Mockito.anyList());
|
}).when(delegateSpy).getDeletableFiles(Mockito.anyList());
|
||||||
|
@ -414,7 +422,11 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
for (FileStatus file : files) {
|
for (FileStatus file : files) {
|
||||||
if (file.isDirectory()) {
|
if (file.isDirectory()) {
|
||||||
List<Path> subFiles = getAllFiles(fs, file.getPath());
|
List<Path> subFiles = getAllFiles(fs, file.getPath());
|
||||||
if (subFiles != null) allFiles.addAll(subFiles);
|
|
||||||
|
if (subFiles != null) {
|
||||||
|
allFiles.addAll(subFiles);
|
||||||
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
allFiles.add(file.getPath());
|
allFiles.add(file.getPath());
|
||||||
|
@ -441,7 +453,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
* Create a new hfile in the passed region
|
* Create a new hfile in the passed region
|
||||||
* @param region region to operate on
|
* @param region region to operate on
|
||||||
* @param columnFamily family for which to add data
|
* @param columnFamily family for which to add data
|
||||||
* @throws IOException
|
* @throws IOException if doing the put or flush fails
|
||||||
*/
|
*/
|
||||||
private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
|
private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
|
||||||
// put one row in the region
|
// put one row in the region
|
||||||
|
@ -453,7 +465,7 @@ public class TestZooKeeperTableArchiveClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param cleaner
|
* @param cleaner the cleaner to use
|
||||||
*/
|
*/
|
||||||
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
|
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
|
||||||
throws InterruptedException {
|
throws InterruptedException {
|
||||||
|
|
Loading…
Reference in New Issue