HBASE-7423 HFileArchiver should not use the configuration from the Filesystem (Enis Soztutar)

git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290@1445841 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Hsieh 2013-02-13 18:49:44 +00:00
parent db5100a75f
commit 9a6a2e87e2
7 changed files with 30 additions and 26 deletions

View File

@ -62,20 +62,22 @@ public class HFileArchiver {
/** /**
* Cleans up all the files for a HRegion by archiving the HFiles to the * Cleans up all the files for a HRegion by archiving the HFiles to the
* archive directory * archive directory
* @param conf the configuration to use
* @param fs the file system object * @param fs the file system object
* @param info HRegionInfo for region to be deleted * @param info HRegionInfo for region to be deleted
* @throws IOException * @throws IOException
*/ */
public static void archiveRegion(FileSystem fs, HRegionInfo info) public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
throws IOException { throws IOException {
Path rootDir = FSUtils.getRootDir(fs.getConf()); Path rootDir = FSUtils.getRootDir(conf);
archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()), archiveRegion(conf, fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
HRegion.getRegionDir(rootDir, info)); HRegion.getRegionDir(rootDir, info));
} }
/** /**
* Remove an entire region from the table directory via archiving the region's hfiles. * Remove an entire region from the table directory via archiving the region's hfiles.
* @param conf the configuration to use
* @param fs {@link FileSystem} from which to remove the region * @param fs {@link FileSystem} from which to remove the region
* @param rootdir {@link Path} to the root directory where hbase files are stored (for building * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
* the archive path) * the archive path)
@ -85,7 +87,8 @@ public class HFileArchiver {
* operations could not complete. * operations could not complete.
* @throws IOException if the request cannot be completed * @throws IOException if the request cannot be completed
*/ */
public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) public static boolean archiveRegion(Configuration conf, FileSystem fs,
Path rootdir, Path tableDir, Path regionDir)
throws IOException { throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("ARCHIVING region " + regionDir.toString()); LOG.debug("ARCHIVING region " + regionDir.toString());
@ -104,7 +107,7 @@ public class HFileArchiver {
// make sure the regiondir lives under the tabledir // make sure the regiondir lives under the tabledir
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(fs.getConf(), tableDir, regionDir); Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir);
LOG.debug("Have an archive directory, preparing to move files"); LOG.debug("Have an archive directory, preparing to move files");
FileStatusConverter getAsFile = new FileStatusConverter(fs); FileStatusConverter getAsFile = new FileStatusConverter(fs);
@ -180,16 +183,16 @@ public class HFileArchiver {
/** /**
* Remove the store files, either by archiving them or outright deletion * Remove the store files, either by archiving them or outright deletion
* @param conf {@link Configuration} to examine to determine the archive directory
* @param fs the filesystem where the store files live * @param fs the filesystem where the store files live
* @param parent Parent region hosting the store files * @param parent Parent region hosting the store files
* @param conf {@link Configuration} to examine to determine the archive directory
* @param family the family hosting the store files * @param family the family hosting the store files
* @param compactedFiles files to be disposed of. No further reading of these files should be * @param compactedFiles files to be disposed of. No further reading of these files should be
* attempted; otherwise likely to cause an {@link IOException} * attempted; otherwise likely to cause an {@link IOException}
* @throws IOException if the files could not be correctly disposed. * @throws IOException if the files could not be correctly disposed.
*/ */
public static void archiveStoreFiles(FileSystem fs, HRegion parent, public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegion parent,
Configuration conf, byte[] family, Collection<StoreFile> compactedFiles) throws IOException { byte[] family, Collection<StoreFile> compactedFiles) throws IOException {
// sometimes in testing, we don't have rss, so we need to check for that // sometimes in testing, we don't have rss, so we need to check for that
if (fs == null) { if (fs == null) {

View File

@ -245,7 +245,7 @@ class CatalogJanitor extends Chore {
} }
FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
LOG.debug("Archiving parent region:" + parent); LOG.debug("Archiving parent region:" + parent);
HFileArchiver.archiveRegion(fs, parent); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent); MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
result = true; result = true;
} }

View File

@ -251,13 +251,13 @@ public class MasterFileSystem {
} }
} while (retrySplitting); } while (retrySplitting);
} }
public void splitLog(final ServerName serverName) throws IOException { public void splitLog(final ServerName serverName) throws IOException {
List<ServerName> serverNames = new ArrayList<ServerName>(); List<ServerName> serverNames = new ArrayList<ServerName>();
serverNames.add(serverName); serverNames.add(serverName);
splitLog(serverNames); splitLog(serverNames);
} }
public void splitLog(final List<ServerName> serverNames) throws IOException { public void splitLog(final List<ServerName> serverNames) throws IOException {
long splitTime = 0, splitLogSize = 0; long splitTime = 0, splitLogSize = 0;
List<Path> logDirs = new ArrayList<Path>(); List<Path> logDirs = new ArrayList<Path>();
@ -293,7 +293,7 @@ public class MasterFileSystem {
// splitLogLock ensures that dead region servers' logs are processed // splitLogLock ensures that dead region servers' logs are processed
// one at a time // one at a time
this.splitLogLock.lock(); this.splitLogLock.lock();
try { try {
HLogSplitter splitter = HLogSplitter.createLogSplitter( HLogSplitter splitter = HLogSplitter.createLogSplitter(
conf, rootdir, logDir, oldLogDir, this.fs); conf, rootdir, logDir, oldLogDir, this.fs);
try { try {
@ -443,7 +443,7 @@ public class MasterFileSystem {
public void deleteRegion(HRegionInfo region) throws IOException { public void deleteRegion(HRegionInfo region) throws IOException {
HFileArchiver.archiveRegion(fs, region); HFileArchiver.archiveRegion(conf, fs, region);
} }
public void deleteTable(byte[] tableName) throws IOException { public void deleteTable(byte[] tableName) throws IOException {
@ -481,7 +481,7 @@ public class MasterFileSystem {
/** /**
* Create new HTableDescriptor in HDFS. * Create new HTableDescriptor in HDFS.
* *
* @param htableDescriptor * @param htableDescriptor
*/ */
public void createTableDescriptor(HTableDescriptor htableDescriptor) public void createTableDescriptor(HTableDescriptor htableDescriptor)

View File

@ -4310,11 +4310,11 @@ public class HRegion implements HeapSize { // , Writable{
} }
// delete out the 'A' region // delete out the 'A' region
HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), HFileArchiver.archiveRegion(a.getBaseConf(), fs,
a.getRegionDir()); FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir());
// delete out the 'B' region // delete out the 'B' region
HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), HFileArchiver.archiveRegion(a.getBaseConf(), fs,
b.getRegionDir()); FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir());
LOG.info("merge completed. New region is " + dstRegion); LOG.info("merge completed. New region is " + dstRegion);

View File

@ -19,13 +19,11 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.NavigableSet; import java.util.NavigableSet;
import java.util.Random;
import java.util.SortedSet; import java.util.SortedSet;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService; import java.util.concurrent.CompletionService;
@ -64,7 +62,10 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.compactions.*; import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
@ -1314,7 +1315,7 @@ public class HStore implements Store, StoreConfiguration {
// let the archive util decide if we should archive or delete the files // let the archive util decide if we should archive or delete the files
LOG.debug("Removing store files after compaction..."); LOG.debug("Removing store files after compaction...");
HFileArchiver.archiveStoreFiles(this.fs, this.region, this.conf, HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.region,
this.family.getName(), compactedFiles); this.family.getName(), compactedFiles);
} catch (IOException e) { } catch (IOException e) {

View File

@ -132,7 +132,7 @@ public class TestHFileArchiving {
// now attempt to depose the region // now attempt to depose the region
Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo()); Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());
HFileArchiver.archiveRegion(fs, region.getRegionInfo()); HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
// check for the existence of the archive directory and some files in it // check for the existence of the archive directory and some files in it
Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region); Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
@ -192,7 +192,7 @@ public class TestHFileArchiving {
} }
// then archive the region // then archive the region
HFileArchiver.archiveRegion(fs, region.getRegionInfo()); HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
// and check to make sure the region directoy got deleted // and check to make sure the region directoy got deleted
assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir)); assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));

View File

@ -30,7 +30,6 @@ import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import com.google.protobuf.Service;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -73,6 +72,7 @@ import org.junit.experimental.categories.Category;
import org.mockito.Mockito; import org.mockito.Mockito;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
@Category(SmallTests.class) @Category(SmallTests.class)
@ -225,7 +225,7 @@ public class TestCatalogJanitor {
@Override @Override
public Configuration getConfiguration() { public Configuration getConfiguration() {
return null; return mfs.conf;
} }
@Override @Override