HBASE-8838 [WINDOWS] FileLink assumptions are broken under NTFS

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1503501 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Enis Soztutar 2013-07-15 22:25:27 +00:00
parent c89c0e25f0
commit a39393addd
2 changed files with 35 additions and 27 deletions

View File

@ -87,6 +87,9 @@ public abstract class FSUtils {
/** Full access permissions (starting point for a umask) */ /** Full access permissions (starting point for a umask) */
private static final String FULL_RWX_PERMISSIONS = "777"; private static final String FULL_RWX_PERMISSIONS = "777";
/** Set to true on Windows platforms */
public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
protected FSUtils() { protected FSUtils() {
super(); super();
} }
@ -95,7 +98,7 @@ public abstract class FSUtils {
* Compare of path component. Does not consider schema; i.e. if schemas different but <code>path * Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
* <code> starts with <code>rootPath<code>, then the function returns true * <code> starts with <code>rootPath<code>, then the function returns true
* @param rootPath * @param rootPath
* @param path * @param path
* @return True if <code>path</code> starts with <code>rootPath</code> * @return True if <code>path</code> starts with <code>rootPath</code>
*/ */
public static boolean isStartingWithPath(final Path rootPath, final String path) { public static boolean isStartingWithPath(final Path rootPath, final String path) {
@ -404,7 +407,7 @@ public abstract class FSUtils {
/** /**
* We use reflection because {@link DistributedFileSystem#setSafeMode( * We use reflection because {@link DistributedFileSystem#setSafeMode(
* FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1 * FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
* *
* @param dfs * @param dfs
* @return whether we're in safe mode * @return whether we're in safe mode
* @throws IOException * @throws IOException
@ -418,14 +421,14 @@ public abstract class FSUtils {
org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true); org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
} catch (Exception e) { } catch (Exception e) {
if (e instanceof IOException) throw (IOException) e; if (e instanceof IOException) throw (IOException) e;
// Check whether dfs is on safemode. // Check whether dfs is on safemode.
inSafeMode = dfs.setSafeMode( inSafeMode = dfs.setSafeMode(
org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET); org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
} }
return inSafeMode; return inSafeMode;
} }
/** /**
* Check whether dfs is in safemode. * Check whether dfs is in safemode.
* @param conf * @param conf
@ -458,7 +461,7 @@ public abstract class FSUtils {
Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
FileStatus[] status = null; FileStatus[] status = null;
try { try {
// hadoop 2.0 throws FNFE if directory does not exist. // hadoop 2.0 throws FNFE if directory does not exist.
// hadoop 1.0 returns null if directory does not exist. // hadoop 1.0 returns null if directory does not exist.
status = fs.listStatus(versionFile); status = fs.listStatus(versionFile);
} catch (FileNotFoundException fnfe) { } catch (FileNotFoundException fnfe) {
@ -1446,7 +1449,7 @@ public abstract class FSUtils {
* @return Map keyed by StoreFile name with a value of the full Path. * @return Map keyed by StoreFile name with a value of the full Path.
* @throws IOException When scanning the directory fails. * @throws IOException When scanning the directory fails.
*/ */
public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map, public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
final FileSystem fs, final Path hbaseRootDir, byte[] tablename) final FileSystem fs, final Path hbaseRootDir, byte[] tablename)
throws IOException { throws IOException {
if (map == null) { if (map == null) {
@ -1456,7 +1459,7 @@ public abstract class FSUtils {
// only include the directory paths to tables // only include the directory paths to tables
Path tableDir = new Path(hbaseRootDir, Bytes.toString(tablename)); Path tableDir = new Path(hbaseRootDir, Bytes.toString(tablename));
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else // Inside a table, there are compaction.dir directories to skip. Otherwise, all else
// should be regions. // should be regions.
PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS); PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
FileStatus[] regionDirs = fs.listStatus(tableDir); FileStatus[] regionDirs = fs.listStatus(tableDir);
for (FileStatus regionDir : regionDirs) { for (FileStatus regionDir : regionDirs) {
@ -1480,7 +1483,7 @@ public abstract class FSUtils {
return map; return map;
} }
/** /**
* Runs through the HBase rootdir and creates a reverse lookup map for * Runs through the HBase rootdir and creates a reverse lookup map for
* table StoreFile names to the full Path. * table StoreFile names to the full Path.
@ -1501,7 +1504,7 @@ public abstract class FSUtils {
// if this method looks similar to 'getTableFragmentation' that is because // if this method looks similar to 'getTableFragmentation' that is because
// it was borrowed from it. // it was borrowed from it.
// only include the directory paths to tables // only include the directory paths to tables
PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS); PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df); FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);

View File

@ -22,27 +22,23 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import org.junit.Test;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import junit.framework.TestCase;
import org.junit.experimental.categories.Category;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.io.FileLink;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/** /**
* Test that FileLink switches between alternate locations * Test that FileLink switches between alternate locations
* when the current location moves or gets deleted. * when the current location moves or gets deleted.
@ -108,12 +104,21 @@ public class TestFileLink {
dataVerify(data, n, (byte)2); dataVerify(data, n, (byte)2);
size += n; size += n;
if (FSUtils.WINDOWS) {
in.close();
}
// Move origin to archive // Move origin to archive
assertFalse(fs.exists(archivedPath)); assertFalse(fs.exists(archivedPath));
fs.rename(originalPath, archivedPath); fs.rename(originalPath, archivedPath);
assertFalse(fs.exists(originalPath)); assertFalse(fs.exists(originalPath));
assertTrue(fs.exists(archivedPath)); assertTrue(fs.exists(archivedPath));
if (FSUtils.WINDOWS) {
in = link.open(fs); // re-read from beginning
in.read(data);
}
// Try to read to the end // Try to read to the end
while ((n = in.read(data)) > 0) { while ((n = in.read(data)) > 0) {
dataVerify(data, n, (byte)2); dataVerify(data, n, (byte)2);