HADOOP-8562. Merge r1477376 for HADOOP-9413, r1476877 for HDFS-4734, r1476856 for HADOOP-9524.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1486238 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-05-24 22:20:55 +00:00
parent f0829a44e4
commit baa2db4c64
16 changed files with 436 additions and 100 deletions

View File

@ -342,6 +342,12 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9490. LocalFileSystem#reportChecksumFailure not closing the HADOOP-9490. LocalFileSystem#reportChecksumFailure not closing the
checksum file handle before rename. (Ivan Mitic via suresh) checksum file handle before rename. (Ivan Mitic via suresh)
HADOOP-9524. Fix ShellCommandFencer to work on Windows.
(Arpit Agarwal via suresh)
HADOOP-9413. Add common utils for File#setReadable/Writable/Executable &
File#canRead/Write/Execute that work cross-platform. (Ivan Mitic via suresh)
HADOOP-9488. FileUtil#createJarWithClassPath only substitutes environment HADOOP-9488. FileUtil#createJarWithClassPath only substitutes environment
variables from current process environment/does not support overriding variables from current process environment/does not support overriding
when launching new process (Chris Nauroth via bikas) when launching new process (Chris Nauroth via bikas)

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -145,9 +144,9 @@ public class FileUtil {
* Pure-Java implementation of "chmod +rwx f". * Pure-Java implementation of "chmod +rwx f".
*/ */
private static void grantPermissions(final File f) { private static void grantPermissions(final File f) {
f.setExecutable(true); FileUtil.setExecutable(f, true);
f.setReadable(true); FileUtil.setReadable(f, true);
f.setWritable(true); FileUtil.setWritable(f, true);
} }
private static boolean deleteImpl(final File f, final boolean doLog) { private static boolean deleteImpl(final File f, final boolean doLog) {
@ -779,6 +778,129 @@ public class FileUtil {
execCommand(file, cmd); execCommand(file, cmd);
} }
/**
* Platform independent implementation for {@link File#setReadable(boolean)}
* File#setReadable does not work as expected on Windows.
* @param f input file
* @param readable
* @return true on success, false otherwise
*/
public static boolean setReadable(File f, boolean readable) {
if (Shell.WINDOWS) {
try {
String permission = readable ? "u+r" : "u-r";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setReadable(readable);
}
}
/**
* Platform independent implementation for {@link File#setWritable(boolean)}
* File#setWritable does not work as expected on Windows.
* @param f input file
* @param writable
* @return true on success, false otherwise
*/
public static boolean setWritable(File f, boolean writable) {
if (Shell.WINDOWS) {
try {
String permission = writable ? "u+w" : "u-w";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setWritable(writable);
}
}
/**
* Platform independent implementation for {@link File#setExecutable(boolean)}
* File#setExecutable does not work as expected on Windows.
* Note: revoking execute permission on folders does not have the same
* behavior on Windows as on Unix platforms. Creating, deleting or renaming
* a file within that folder will still succeed on Windows.
* @param f input file
* @param executable
* @return true on success, false otherwise
*/
public static boolean setExecutable(File f, boolean executable) {
if (Shell.WINDOWS) {
try {
String permission = executable ? "u+x" : "u-x";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setExecutable(executable);
}
}
/**
* Platform independent implementation for {@link File#canRead()}
* @param f input file
* @return On Unix, same as {@link File#canRead()}
* On Windows, true if process has read access on the path
*/
public static boolean canRead(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_READ);
} catch (IOException e) {
return false;
}
} else {
return f.canRead();
}
}
/**
* Platform independent implementation for {@link File#canWrite()}
* @param f input file
* @return On Unix, same as {@link File#canWrite()}
* On Windows, true if process has write access on the path
*/
public static boolean canWrite(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE);
} catch (IOException e) {
return false;
}
} else {
return f.canWrite();
}
}
/**
* Platform independent implementation for {@link File#canExecute()}
* @param f input file
* @return On Unix, same as {@link File#canExecute()}
* On Windows, true if process has execute access on the path
*/
public static boolean canExecute(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE);
} catch (IOException e) {
return false;
}
} else {
return f.canExecute();
}
}
/** /**
* Set permissions to the required value. Uses the java primitives instead * Set permissions to the required value. Uses the java primitives instead
* of forking if group == other. * of forking if group == other.

View File

@ -103,7 +103,8 @@ public class LocalFileSystem extends ChecksumFileSystem {
String device = new DF(f, getConf()).getMount(); String device = new DF(f, getConf()).getMount();
File parent = f.getParentFile(); File parent = f.getParentFile();
File dir = null; File dir = null;
while (parent!=null && parent.canWrite() && parent.toString().startsWith(device)) { while (parent != null && FileUtil.canWrite(parent) &&
parent.toString().startsWith(device)) {
dir = parent; dir = parent;
parent = parent.getParentFile(); parent = parent.getParentFile();
} }

View File

@ -356,6 +356,43 @@ public class NativeIO {
/** Windows only methods used for getOwner() implementation */ /** Windows only methods used for getOwner() implementation */
private static native String getOwner(FileDescriptor fd) throws IOException; private static native String getOwner(FileDescriptor fd) throws IOException;
/** Supported list of Windows access right flags */
public static enum AccessRight {
ACCESS_READ (0x0001), // FILE_READ_DATA
ACCESS_WRITE (0x0002), // FILE_WRITE_DATA
ACCESS_EXECUTE (0x0020); // FILE_EXECUTE
private final int accessRight;
AccessRight(int access) {
accessRight = access;
}
public int accessRight() {
return accessRight;
}
};
/** Windows only method used to check if the current process has requested
* access rights on the given path. */
private static native boolean access0(String path, int requestedAccess);
/**
* Checks whether the current process has desired access rights on
* the given path.
*
* Longer term this native function can be substituted with JDK7
* function Files#isReadable, isWritable, isExecutable.
*
* @param path input path
* @param desiredAccess ACCESS_READ, ACCESS_WRITE or ACCESS_EXECUTE
* @return true if access is allowed
* @throws IOException I/O exception on error
*/
public static boolean access(String path, AccessRight desiredAccess)
throws IOException {
return access0(path, desiredAccess.accessRight());
}
static { static {
if (NativeCodeLoader.isNativeCodeLoaded()) { if (NativeCodeLoader.isNativeCodeLoaded()) {
try { try {

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
@ -160,12 +161,8 @@ public class DiskChecker {
+ dir.toString()); + dir.toString());
} }
if (Shell.WINDOWS) {
checkAccessByFileSystemInteraction(dir);
} else {
checkAccessByFileMethods(dir); checkAccessByFileMethods(dir);
} }
}
/** /**
* Checks that the current running process can read, write, and execute the * Checks that the current running process can read, write, and execute the
@ -177,68 +174,19 @@ public class DiskChecker {
*/ */
private static void checkAccessByFileMethods(File dir) private static void checkAccessByFileMethods(File dir)
throws DiskErrorException { throws DiskErrorException {
if (!dir.canRead()) { if (!FileUtil.canRead(dir)) {
throw new DiskErrorException("Directory is not readable: " throw new DiskErrorException("Directory is not readable: "
+ dir.toString()); + dir.toString());
} }
if (!dir.canWrite()) { if (!FileUtil.canWrite(dir)) {
throw new DiskErrorException("Directory is not writable: " throw new DiskErrorException("Directory is not writable: "
+ dir.toString()); + dir.toString());
} }
if (!dir.canExecute()) { if (!FileUtil.canExecute(dir)) {
throw new DiskErrorException("Directory is not executable: " throw new DiskErrorException("Directory is not executable: "
+ dir.toString()); + dir.toString());
} }
} }
/**
* Checks that the current running process can read, write, and execute the
* given directory by attempting each of those operations on the file system.
* This method contains several workarounds to known JVM bugs that cause
* File.canRead, File.canWrite, and File.canExecute to return incorrect results
* on Windows with NTFS ACLs. See:
* http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6203387
* These bugs are supposed to be fixed in JDK7.
*
* @param dir File to check
* @throws DiskErrorException if dir is not readable, not writable, or not
* executable
*/
private static void checkAccessByFileSystemInteraction(File dir)
throws DiskErrorException {
// Make sure we can read the directory by listing it.
if (dir.list() == null) {
throw new DiskErrorException("Directory is not readable: "
+ dir.toString());
}
// Make sure we can write to the directory by creating a temp file in it.
try {
File tempFile = File.createTempFile("checkDirAccess", null, dir);
if (!tempFile.delete()) {
throw new DiskErrorException("Directory is not writable: "
+ dir.toString());
}
} catch (IOException e) {
throw new DiskErrorException("Directory is not writable: "
+ dir.toString(), e);
}
// Make sure the directory is executable by trying to cd into it. This
// launches a separate process. It does not change the working directory of
// the current process.
try {
String[] cdCmd = new String[] { "cmd", "/C", "cd",
dir.getAbsolutePath() };
Shell.execCommand(null, cdCmd, SHELL_TIMEOUT);
} catch (Shell.ExitCodeException e) {
throw new DiskErrorException("Directory is not executable: "
+ dir.toString(), e);
} catch (IOException e) {
throw new DiskErrorException("Directory is not executable: "
+ dir.toString(), e);
}
}
} }

View File

@ -812,6 +812,42 @@ cleanup:
#endif #endif
} }
/*
* Class: org_apache_hadoop_io_nativeio_NativeIO_Windows
* Method: access0
* Signature: (Ljava/lang/String;I)Z
*/
JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_access0
(JNIEnv *env, jclass clazz, jstring jpath, jint jaccess)
{
#ifdef UNIX
THROW(env, "java/io/IOException",
"The function access0(path, access) is not supported on Unix");
return NULL;
#endif
#ifdef WINDOWS
LPCWSTR path = NULL;
DWORD dwRtnCode = ERROR_SUCCESS;
ACCESS_MASK access = (ACCESS_MASK)jaccess;
BOOL allowed = FALSE;
path = (LPCWSTR) (*env)->GetStringChars(env, jpath, NULL);
if (!path) goto cleanup; // exception was thrown
dwRtnCode = CheckAccessForCurrentUser(path, access, &allowed);
if (dwRtnCode != ERROR_SUCCESS) {
throw_ioe(env, dwRtnCode);
goto cleanup;
}
cleanup:
if (path) (*env)->ReleaseStringChars(env, jpath, path);
return (jboolean)allowed;
#endif
}
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env, Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env,
jclass clazz, jstring jsrc, jstring jdst) jclass clazz, jstring jsrc, jstring jdst)

View File

@ -110,6 +110,11 @@ void SystemInfoUsage();
DWORD GetFileInformationByName(__in LPCWSTR pathName, __in BOOL followLink, DWORD GetFileInformationByName(__in LPCWSTR pathName, __in BOOL followLink,
__out LPBY_HANDLE_FILE_INFORMATION lpFileInformation); __out LPBY_HANDLE_FILE_INFORMATION lpFileInformation);
DWORD CheckAccessForCurrentUser(
__in PCWSTR pathName,
__in ACCESS_MASK requestedAccess,
__out BOOL *allowed);
DWORD ConvertToLongPath(__in PCWSTR path, __deref_out PWSTR *newPath); DWORD ConvertToLongPath(__in PCWSTR path, __deref_out PWSTR *newPath);
DWORD GetSidFromAcctNameW(__in PCWSTR acctName, __out PSID* ppSid); DWORD GetSidFromAcctNameW(__in PCWSTR acctName, __out PSID* ppSid);

View File

@ -567,7 +567,7 @@ static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
PSID pSid, PSID pSid,
PACCESS_MASK pAccessRights) PACCESS_MASK pAccessRights)
{ {
AUTHZ_RESOURCE_MANAGER_HANDLE hManager; AUTHZ_RESOURCE_MANAGER_HANDLE hManager = NULL;
LUID unusedId = { 0 }; LUID unusedId = { 0 };
AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClientContext = NULL; AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClientContext = NULL;
DWORD dwRtnCode = ERROR_SUCCESS; DWORD dwRtnCode = ERROR_SUCCESS;
@ -581,6 +581,10 @@ static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
return GetLastError(); return GetLastError();
} }
// Pass AUTHZ_SKIP_TOKEN_GROUPS to the function to avoid querying user group
// information for access check. This allows us to model POSIX permissions
// on Windows, where a user can have less permissions than a group it
// belongs to.
if(!AuthzInitializeContextFromSid(AUTHZ_SKIP_TOKEN_GROUPS, if(!AuthzInitializeContextFromSid(AUTHZ_SKIP_TOKEN_GROUPS,
pSid, hManager, NULL, unusedId, NULL, &hAuthzClientContext)) pSid, hManager, NULL, unusedId, NULL, &hAuthzClientContext))
{ {
@ -594,16 +598,115 @@ static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
ret = dwRtnCode; ret = dwRtnCode;
goto GetEffectiveRightsForSidEnd; goto GetEffectiveRightsForSidEnd;
} }
if (!AuthzFreeContext(hAuthzClientContext))
{
ret = GetLastError();
goto GetEffectiveRightsForSidEnd;
}
GetEffectiveRightsForSidEnd: GetEffectiveRightsForSidEnd:
if (hManager != NULL)
{
(void)AuthzFreeResourceManager(hManager);
}
if (hAuthzClientContext != NULL)
{
(void)AuthzFreeContext(hAuthzClientContext);
}
return ret; return ret;
} }
//----------------------------------------------------------------------------
// Function: CheckAccessForCurrentUser
//
// Description:
// Checks if the current process has the requested access rights on the given
// path. Based on the following MSDN article:
// http://msdn.microsoft.com/en-us/library/windows/desktop/ff394771(v=vs.85).aspx
//
// Returns:
// ERROR_SUCCESS: on success
//
DWORD CheckAccessForCurrentUser(
__in PCWSTR pathName,
__in ACCESS_MASK requestedAccess,
__out BOOL *allowed)
{
DWORD dwRtnCode = ERROR_SUCCESS;
LPWSTR longPathName = NULL;
HANDLE hProcessToken = NULL;
PSECURITY_DESCRIPTOR pSd = NULL;
AUTHZ_RESOURCE_MANAGER_HANDLE hManager = NULL;
AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClientContext = NULL;
LUID Luid = {0, 0};
ACCESS_MASK currentUserAccessRights = 0;
// Prepend the long path prefix if needed
dwRtnCode = ConvertToLongPath(pathName, &longPathName);
if (dwRtnCode != ERROR_SUCCESS)
{
goto CheckAccessEnd;
}
// Get SD of the given path. OWNER and DACL security info must be
// requested, otherwise, AuthzAccessCheck fails with invalid parameter
// error.
dwRtnCode = GetNamedSecurityInfo(longPathName, SE_FILE_OBJECT,
OWNER_SECURITY_INFORMATION | GROUP_SECURITY_INFORMATION |
DACL_SECURITY_INFORMATION,
NULL, NULL, NULL, NULL, &pSd);
if (dwRtnCode != ERROR_SUCCESS)
{
goto CheckAccessEnd;
}
// Get current process token
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hProcessToken))
{
dwRtnCode = GetLastError();
goto CheckAccessEnd;
}
if (!AuthzInitializeResourceManager(AUTHZ_RM_FLAG_NO_AUDIT, NULL, NULL,
NULL, NULL, &hManager))
{
dwRtnCode = GetLastError();
goto CheckAccessEnd;
}
if(!AuthzInitializeContextFromToken(0, hProcessToken, hManager, NULL,
Luid, NULL, &hAuthzClientContext))
{
dwRtnCode = GetLastError();
goto CheckAccessEnd;
}
dwRtnCode = GetAccess(hAuthzClientContext, pSd, &currentUserAccessRights);
if (dwRtnCode != ERROR_SUCCESS)
{
goto CheckAccessEnd;
}
*allowed = ((currentUserAccessRights & requestedAccess) == requestedAccess);
CheckAccessEnd:
LocalFree(longPathName);
LocalFree(pSd);
if (hProcessToken != NULL)
{
CloseHandle(hProcessToken);
}
if (hManager != NULL)
{
(void)AuthzFreeResourceManager(hManager);
}
if (hAuthzClientContext != NULL)
{
(void)AuthzFreeContext(hAuthzClientContext);
}
return dwRtnCode;
}
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
// Function: FindFileOwnerAndPermission // Function: FindFileOwnerAndPermission
// //

View File

@ -353,15 +353,15 @@ public class TestFileUtil {
} }
private static void grantPermissions(final File f) { private static void grantPermissions(final File f) {
f.setReadable(true); FileUtil.setReadable(f, true);
f.setWritable(true); FileUtil.setWritable(f, true);
f.setExecutable(true); FileUtil.setExecutable(f, true);
} }
private static void revokePermissions(final File f) { private static void revokePermissions(final File f) {
f.setWritable(false); FileUtil.setWritable(f, false);
f.setExecutable(false); FileUtil.setExecutable(f, false);
f.setReadable(false); FileUtil.setReadable(f, false);
} }
// Validates the return value. // Validates the return value.

View File

@ -61,7 +61,7 @@ public class TestLocalFileSystem {
@After @After
public void after() throws IOException { public void after() throws IOException {
base.setWritable(true); FileUtil.setWritable(base, true);
FileUtil.fullyDelete(base); FileUtil.fullyDelete(base);
assertTrue(!base.exists()); assertTrue(!base.exists());
} }
@ -279,7 +279,7 @@ public class TestLocalFileSystem {
final File dir1 = new File(base, "dir1"); final File dir1 = new File(base, "dir1");
final File dir2 = new File(dir1, "dir2"); final File dir2 = new File(dir1, "dir2");
dir2.mkdirs(); dir2.mkdirs();
assertTrue(dir2.exists() && dir2.canWrite()); assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
final String dataFileName = "corruptedData"; final String dataFileName = "corruptedData";
final Path dataPath = new Path(new File(dir2, dataFileName).toURI()); final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
@ -302,7 +302,7 @@ public class TestLocalFileSystem {
// this is a hack to force the #reportChecksumFailure() method to stop // this is a hack to force the #reportChecksumFailure() method to stop
// climbing up at the 'base' directory and use 'dir1/bad_files' as the // climbing up at the 'base' directory and use 'dir1/bad_files' as the
// corrupted files storage: // corrupted files storage:
base.setWritable(false); FileUtil.setWritable(base, false);
FSDataInputStream dataFsdis = fileSys.open(dataPath); FSDataInputStream dataFsdis = fileSys.open(dataPath);
FSDataInputStream checksumFsdis = fileSys.open(checksumPath); FSDataInputStream checksumFsdis = fileSys.open(checksumPath);

View File

@ -240,6 +240,44 @@ public class TestNativeIO {
} }
/** Validate access checks on Windows */
@Test (timeout = 30000)
public void testAccess() throws Exception {
if (!Path.WINDOWS) {
return;
}
File testFile = new File(TEST_DIR, "testfileaccess");
assertTrue(testFile.createNewFile());
// Validate ACCESS_READ
FileUtil.setReadable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setReadable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_READ));
// Validate ACCESS_WRITE
FileUtil.setWritable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setWritable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE));
// Validate ACCESS_EXECUTE
FileUtil.setExecutable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
FileUtil.setExecutable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testOpenMissingWithoutCreate() throws Exception { public void testOpenMissingWithoutCreate() throws Exception {
if (Path.WINDOWS) { if (Path.WINDOWS) {

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import java.util.Random; import java.util.Random;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileUtil;
/** /**
* Create a temporary directory in which sockets can be created. * Create a temporary directory in which sockets can be created.
@ -37,7 +38,7 @@ public class TemporarySocketDirectory implements Closeable {
dir = new File(tmp, "socks." + (System.currentTimeMillis() + dir = new File(tmp, "socks." + (System.currentTimeMillis() +
"." + (new Random().nextInt()))); "." + (new Random().nextInt())));
dir.mkdirs(); dir.mkdirs();
dir.setWritable(true, true); FileUtil.setWritable(dir, true);
} }
public File getDir() { public File getDir() {

View File

@ -28,6 +28,8 @@ import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo; import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean; import java.lang.management.ThreadMXBean;
import org.apache.hadoop.fs.FileUtil;
public class TestShell extends TestCase { public class TestShell extends TestCase {
private static class Command extends Shell { private static class Command extends Shell {
@ -92,7 +94,7 @@ public class TestShell extends TestCase {
PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile)); PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
writer.println(timeoutCommand); writer.println(timeoutCommand);
writer.close(); writer.close();
shellFile.setExecutable(true); FileUtil.setExecutable(shellFile, true);
Shell.ShellCommandExecutor shexc Shell.ShellCommandExecutor shexc
= new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()}, = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
null, null, 100); null, null, 100);

View File

@ -400,6 +400,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4705. Address HDFS test failures on Windows because of invalid HDFS-4705. Address HDFS test failures on Windows because of invalid
dfs.namenode.name.dir. (Ivan Mitic via suresh) dfs.namenode.name.dir. (Ivan Mitic via suresh)
HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
(Arpit Agarwal via suresh)
Release 2.0.4-alpha - 2013-04-25 Release 2.0.4-alpha - 2013-04-25
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.test.MockitoUtil; import org.apache.hadoop.test.MockitoUtil;
import org.apache.hadoop.util.Shell;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.ArgumentCaptor; import org.mockito.ArgumentCaptor;
@ -73,6 +74,17 @@ public class TestDFSHAAdmin {
private static String HOST_A = "1.2.3.1"; private static String HOST_A = "1.2.3.1";
private static String HOST_B = "1.2.3.2"; private static String HOST_B = "1.2.3.2";
// Fencer shell commands that always return true and false respectively
// on Unix.
private static String FENCER_TRUE_COMMAND_UNIX = "shell(true)";
private static String FENCER_FALSE_COMMAND_UNIX = "shell(false)";
// Fencer shell commands that always return true and false respectively
// on Windows. Lacking POSIX 'true' and 'false' commands we use the DOS
// commands 'rem' and 'help.exe'.
private static String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)";
private static String FENCER_FALSE_COMMAND_WINDOWS = "shell(help.exe /? >NUL)";
private HdfsConfiguration getHAConf() { private HdfsConfiguration getHAConf() {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID); conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);
@ -89,6 +101,16 @@ public class TestDFSHAAdmin {
return conf; return conf;
} }
public static String getFencerTrueCommand() {
return Shell.WINDOWS ?
FENCER_TRUE_COMMAND_WINDOWS : FENCER_TRUE_COMMAND_UNIX;
}
public static String getFencerFalseCommand() {
return Shell.WINDOWS ?
FENCER_FALSE_COMMAND_WINDOWS : FENCER_FALSE_COMMAND_UNIX;
}
@Before @Before
public void setup() throws IOException { public void setup() throws IOException {
mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class); mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
@ -173,7 +195,7 @@ public class TestDFSHAAdmin {
// Turn on auto-HA in the config // Turn on auto-HA in the config
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
// Should fail without the forcemanual flag // Should fail without the forcemanual flag
@ -250,7 +272,7 @@ public class TestDFSHAAdmin {
public void testFailoverWithFencerConfigured() throws Exception { public void testFailoverWithFencerConfigured() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2")); assertEquals(0, runTool("-failover", "nn1", "nn2"));
} }
@ -259,7 +281,7 @@ public class TestDFSHAAdmin {
public void testFailoverWithFencerAndNameservice() throws Exception { public void testFailoverWithFencerAndNameservice() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2")); assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
} }
@ -268,7 +290,7 @@ public class TestDFSHAAdmin {
public void testFailoverWithFencerConfiguredAndForce() throws Exception { public void testFailoverWithFencerConfiguredAndForce() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
} }
@ -277,7 +299,7 @@ public class TestDFSHAAdmin {
public void testFailoverWithForceActive() throws Exception { public void testFailoverWithForceActive() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive")); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive"));
} }
@ -286,7 +308,7 @@ public class TestDFSHAAdmin {
public void testFailoverWithInvalidFenceArg() throws Exception { public void testFailoverWithInvalidFenceArg() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence")); assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
} }
@ -312,7 +334,7 @@ public class TestDFSHAAdmin {
// Turn on auto-HA in the config // Turn on auto-HA in the config
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2")); assertEquals(0, runTool("-failover", "nn1", "nn2"));
@ -323,7 +345,7 @@ public class TestDFSHAAdmin {
public void testForceFenceOptionListedBeforeArgs() throws Exception { public void testForceFenceOptionListedBeforeArgs() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2")); assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
} }
@ -359,23 +381,23 @@ public class TestDFSHAAdmin {
HdfsConfiguration conf = getHAConf(); HdfsConfiguration conf = getHAConf();
// Set the default fencer to succeed // Set the default fencer to succeed
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
// Set the NN-specific fencer to fail. Should fail to fence. // Set the NN-specific fencer to fail. Should fail to fence.
conf.set(nnSpecificKey, "shell(false)"); conf.set(nnSpecificKey, getFencerFalseCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
conf.unset(nnSpecificKey); conf.unset(nnSpecificKey);
// Set an NS-specific fencer to fail. Should fail. // Set an NS-specific fencer to fail. Should fail.
conf.set(nsSpecificKey, "shell(false)"); conf.set(nsSpecificKey, getFencerFalseCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
// Set the NS-specific fencer to succeed. Should succeed // Set the NS-specific fencer to succeed. Should succeed
conf.set(nsSpecificKey, "shell(true)"); conf.set(nsSpecificKey, getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
} }

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.util.Shell;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -114,7 +115,8 @@ public class TestDFSHAAdminMiniCluster {
@Test @Test
public void testTryFailoverToSafeMode() throws Exception { public void testTryFailoverToSafeMode() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false); NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
@ -136,10 +138,17 @@ public class TestDFSHAAdminMiniCluster {
// tmp file, so we can verify that the args were substituted right // tmp file, so we can verify that the args were substituted right
File tmpFile = File.createTempFile("testFencer", ".txt"); File tmpFile = File.createTempFile("testFencer", ".txt");
tmpFile.deleteOnExit(); tmpFile.deleteOnExit();
if (Shell.WINDOWS) {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
"shell(echo %target_nameserviceid%.%target_namenodeid% " +
"%target_port% %dfs_ha_namenode_id% > " +
tmpFile.getAbsolutePath() + ")");
} else {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
"shell(echo -n $target_nameserviceid.$target_namenodeid " + "shell(echo -n $target_nameserviceid.$target_namenodeid " +
"$target_port $dfs_ha_namenode_id > " + "$target_port $dfs_ha_namenode_id > " +
tmpFile.getAbsolutePath() + ")"); tmpFile.getAbsolutePath() + ")");
}
// Test failover with fencer // Test failover with fencer
tool.setConf(conf); tool.setConf(conf);
@ -156,9 +165,11 @@ public class TestDFSHAAdminMiniCluster {
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
// The fence script should run with the configuration from the target // The fence script should run with the configuration from the target
// node, rather than the configuration from the fencing node // node, rather than the configuration from the fencing node. Strip
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", // out any trailing spaces and CR/LFs which may be present on Windows.
Files.toString(tmpFile, Charsets.UTF_8)); String fenceCommandOutput =Files.toString(tmpFile, Charsets.UTF_8).
replaceAll(" *[\r\n]+", "");
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", fenceCommandOutput);
tmpFile.delete(); tmpFile.delete();
// Test failover with forceactive option // Test failover with forceactive option
@ -181,7 +192,8 @@ public class TestDFSHAAdminMiniCluster {
assertFalse(tmpFile.exists()); assertFalse(tmpFile.exists());
// Test failover with force fence listed before the other arguments // Test failover with force fence listed before the other arguments
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)"); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf); tool.setConf(conf);
assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2")); assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
} }