diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index bfd15ee231d..eb8d5517302 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -62,20 +62,22 @@ public class HFileArchiver { /** * Cleans up all the files for a HRegion by archiving the HFiles to the * archive directory + * @param conf the configuration to use * @param fs the file system object * @param info HRegionInfo for region to be deleted * @throws IOException */ - public static void archiveRegion(FileSystem fs, HRegionInfo info) + public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info) throws IOException { - Path rootDir = FSUtils.getRootDir(fs.getConf()); - archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()), + Path rootDir = FSUtils.getRootDir(conf); + archiveRegion(conf, fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()), HRegion.getRegionDir(rootDir, info)); } /** * Remove an entire region from the table directory via archiving the region's hfiles. + * @param conf the configuration to use * @param fs {@link FileSystem} from which to remove the region * @param rootdir {@link Path} to the root directory where hbase files are stored (for building * the archive path) @@ -85,7 +87,8 @@ public class HFileArchiver { * operations could not complete. * @throws IOException if the request cannot be completed */ - public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) + public static boolean archiveRegion(Configuration conf, FileSystem fs, + Path rootdir, Path tableDir, Path regionDir) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("ARCHIVING region " + regionDir.toString()); @@ -104,7 +107,7 @@ public class HFileArchiver { // make sure the regiondir lives under the tabledir Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); - Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(fs.getConf(), tableDir, regionDir); + Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir); LOG.debug("Have an archive directory, preparing to move files"); FileStatusConverter getAsFile = new FileStatusConverter(fs); @@ -180,16 +183,16 @@ public class HFileArchiver { /** * Remove the store files, either by archiving them or outright deletion + * @param conf {@link Configuration} to examine to determine the archive directory * @param fs the filesystem where the store files live * @param parent Parent region hosting the store files - * @param conf {@link Configuration} to examine to determine the archive directory * @param family the family hosting the store files * @param compactedFiles files to be disposed of. No further reading of these files should be * attempted; otherwise likely to cause an {@link IOException} * @throws IOException if the files could not be correctly disposed. */ - public static void archiveStoreFiles(FileSystem fs, HRegion parent, - Configuration conf, byte[] family, Collection compactedFiles) throws IOException { + public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegion parent, + byte[] family, Collection compactedFiles) throws IOException { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index c6cadf1062d..176d9f15d20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -245,7 +245,7 @@ class CatalogJanitor extends Chore { } FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); LOG.debug("Archiving parent region:" + parent); - HFileArchiver.archiveRegion(fs, parent); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent); result = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 38a12e1792f..904912fa28b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -251,13 +251,13 @@ public class MasterFileSystem { } } while (retrySplitting); } - + public void splitLog(final ServerName serverName) throws IOException { List serverNames = new ArrayList(); serverNames.add(serverName); splitLog(serverNames); } - + public void splitLog(final List serverNames) throws IOException { long splitTime = 0, splitLogSize = 0; List logDirs = new ArrayList(); @@ -293,7 +293,7 @@ public class MasterFileSystem { // splitLogLock ensures that dead region servers' logs are processed // one at a time this.splitLogLock.lock(); - try { + try { HLogSplitter splitter = HLogSplitter.createLogSplitter( conf, rootdir, logDir, oldLogDir, this.fs); try { @@ -443,7 +443,7 @@ public class MasterFileSystem { public void deleteRegion(HRegionInfo region) throws IOException { - HFileArchiver.archiveRegion(fs, region); + HFileArchiver.archiveRegion(conf, fs, region); } public void deleteTable(byte[] tableName) throws IOException { @@ -481,7 +481,7 @@ public class MasterFileSystem { /** * Create new HTableDescriptor in HDFS. - * + * * @param htableDescriptor */ public void createTableDescriptor(HTableDescriptor htableDescriptor) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index b1be10ddda6..53f1d676f66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4310,11 +4310,11 @@ public class HRegion implements HeapSize { // , Writable{ } // delete out the 'A' region - HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), - a.getRegionDir()); + HFileArchiver.archiveRegion(a.getBaseConf(), fs, + FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir()); // delete out the 'B' region - HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), - b.getRegionDir()); + HFileArchiver.archiveRegion(a.getBaseConf(), fs, + FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir()); LOG.info("merge completed. New region is " + dstRegion); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 8be5f973476..528ac02b068 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -19,13 +19,11 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.NavigableSet; -import java.util.Random; import java.util.SortedSet; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; @@ -64,7 +62,10 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.regionserver.compactions.*; +import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; @@ -1314,7 +1315,7 @@ public class HStore implements Store, StoreConfiguration { // let the archive util decide if we should archive or delete the files LOG.debug("Removing store files after compaction..."); - HFileArchiver.archiveStoreFiles(this.fs, this.region, this.conf, + HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.region, this.family.getName(), compactedFiles); } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index e1dd0c5ddb4..3bb7c82f0d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -132,7 +132,7 @@ public class TestHFileArchiving { // now attempt to depose the region Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo()); - HFileArchiver.archiveRegion(fs, region.getRegionInfo()); + HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); // check for the existence of the archive directory and some files in it Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region); @@ -192,7 +192,7 @@ public class TestHFileArchiving { } // then archive the region - HFileArchiver.archiveRegion(fs, region.getRegionInfo()); + HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); // and check to make sure the region directoy got deleted assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index a5dfdff45c4..96291964ed9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -30,7 +30,6 @@ import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; -import com.google.protobuf.Service; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -73,6 +72,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; import com.google.protobuf.RpcController; +import com.google.protobuf.Service; import com.google.protobuf.ServiceException; @Category(SmallTests.class) @@ -225,7 +225,7 @@ public class TestCatalogJanitor { @Override public Configuration getConfiguration() { - return null; + return mfs.conf; } @Override