HADOOP-8562. Merge r1477376 for HADOOP-9413, r1476877 for HDFS-4734, r1476856 for HADOOP-9524.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1486238 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f0829a44e4
commit
baa2db4c64
|
@ -342,6 +342,12 @@ Release 2.0.5-beta - UNRELEASED
|
|||
HADOOP-9490. LocalFileSystem#reportChecksumFailure not closing the
|
||||
checksum file handle before rename. (Ivan Mitic via suresh)
|
||||
|
||||
HADOOP-9524. Fix ShellCommandFencer to work on Windows.
|
||||
(Arpit Agarwal via suresh)
|
||||
|
||||
HADOOP-9413. Add common utils for File#setReadable/Writable/Executable &
|
||||
File#canRead/Write/Execute that work cross-platform. (Ivan Mitic via suresh)
|
||||
|
||||
HADOOP-9488. FileUtil#createJarWithClassPath only substitutes environment
|
||||
variables from current process environment/does not support overriding
|
||||
when launching new process (Chris Nauroth via bikas)
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.io.nativeio.NativeIO;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
|
@ -145,9 +144,9 @@ public class FileUtil {
|
|||
* Pure-Java implementation of "chmod +rwx f".
|
||||
*/
|
||||
private static void grantPermissions(final File f) {
|
||||
f.setExecutable(true);
|
||||
f.setReadable(true);
|
||||
f.setWritable(true);
|
||||
FileUtil.setExecutable(f, true);
|
||||
FileUtil.setReadable(f, true);
|
||||
FileUtil.setWritable(f, true);
|
||||
}
|
||||
|
||||
private static boolean deleteImpl(final File f, final boolean doLog) {
|
||||
|
@ -779,6 +778,129 @@ public class FileUtil {
|
|||
execCommand(file, cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform independent implementation for {@link File#setReadable(boolean)}
|
||||
* File#setReadable does not work as expected on Windows.
|
||||
* @param f input file
|
||||
* @param readable
|
||||
* @return true on success, false otherwise
|
||||
*/
|
||||
public static boolean setReadable(File f, boolean readable) {
|
||||
if (Shell.WINDOWS) {
|
||||
try {
|
||||
String permission = readable ? "u+r" : "u-r";
|
||||
FileUtil.chmod(f.getCanonicalPath(), permission, false);
|
||||
return true;
|
||||
} catch (IOException ex) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return f.setReadable(readable);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform independent implementation for {@link File#setWritable(boolean)}
|
||||
* File#setWritable does not work as expected on Windows.
|
||||
* @param f input file
|
||||
* @param writable
|
||||
* @return true on success, false otherwise
|
||||
*/
|
||||
public static boolean setWritable(File f, boolean writable) {
|
||||
if (Shell.WINDOWS) {
|
||||
try {
|
||||
String permission = writable ? "u+w" : "u-w";
|
||||
FileUtil.chmod(f.getCanonicalPath(), permission, false);
|
||||
return true;
|
||||
} catch (IOException ex) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return f.setWritable(writable);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform independent implementation for {@link File#setExecutable(boolean)}
|
||||
* File#setExecutable does not work as expected on Windows.
|
||||
* Note: revoking execute permission on folders does not have the same
|
||||
* behavior on Windows as on Unix platforms. Creating, deleting or renaming
|
||||
* a file within that folder will still succeed on Windows.
|
||||
* @param f input file
|
||||
* @param executable
|
||||
* @return true on success, false otherwise
|
||||
*/
|
||||
public static boolean setExecutable(File f, boolean executable) {
|
||||
if (Shell.WINDOWS) {
|
||||
try {
|
||||
String permission = executable ? "u+x" : "u-x";
|
||||
FileUtil.chmod(f.getCanonicalPath(), permission, false);
|
||||
return true;
|
||||
} catch (IOException ex) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return f.setExecutable(executable);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform independent implementation for {@link File#canRead()}
|
||||
* @param f input file
|
||||
* @return On Unix, same as {@link File#canRead()}
|
||||
* On Windows, true if process has read access on the path
|
||||
*/
|
||||
public static boolean canRead(File f) {
|
||||
if (Shell.WINDOWS) {
|
||||
try {
|
||||
return NativeIO.Windows.access(f.getCanonicalPath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_READ);
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return f.canRead();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform independent implementation for {@link File#canWrite()}
|
||||
* @param f input file
|
||||
* @return On Unix, same as {@link File#canWrite()}
|
||||
* On Windows, true if process has write access on the path
|
||||
*/
|
||||
public static boolean canWrite(File f) {
|
||||
if (Shell.WINDOWS) {
|
||||
try {
|
||||
return NativeIO.Windows.access(f.getCanonicalPath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_WRITE);
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return f.canWrite();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform independent implementation for {@link File#canExecute()}
|
||||
* @param f input file
|
||||
* @return On Unix, same as {@link File#canExecute()}
|
||||
* On Windows, true if process has execute access on the path
|
||||
*/
|
||||
public static boolean canExecute(File f) {
|
||||
if (Shell.WINDOWS) {
|
||||
try {
|
||||
return NativeIO.Windows.access(f.getCanonicalPath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_EXECUTE);
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return f.canExecute();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set permissions to the required value. Uses the java primitives instead
|
||||
* of forking if group == other.
|
||||
|
|
|
@ -103,7 +103,8 @@ public class LocalFileSystem extends ChecksumFileSystem {
|
|||
String device = new DF(f, getConf()).getMount();
|
||||
File parent = f.getParentFile();
|
||||
File dir = null;
|
||||
while (parent!=null && parent.canWrite() && parent.toString().startsWith(device)) {
|
||||
while (parent != null && FileUtil.canWrite(parent) &&
|
||||
parent.toString().startsWith(device)) {
|
||||
dir = parent;
|
||||
parent = parent.getParentFile();
|
||||
}
|
||||
|
|
|
@ -356,6 +356,43 @@ public class NativeIO {
|
|||
/** Windows only methods used for getOwner() implementation */
|
||||
private static native String getOwner(FileDescriptor fd) throws IOException;
|
||||
|
||||
/** Supported list of Windows access right flags */
|
||||
public static enum AccessRight {
|
||||
ACCESS_READ (0x0001), // FILE_READ_DATA
|
||||
ACCESS_WRITE (0x0002), // FILE_WRITE_DATA
|
||||
ACCESS_EXECUTE (0x0020); // FILE_EXECUTE
|
||||
|
||||
private final int accessRight;
|
||||
AccessRight(int access) {
|
||||
accessRight = access;
|
||||
}
|
||||
|
||||
public int accessRight() {
|
||||
return accessRight;
|
||||
}
|
||||
};
|
||||
|
||||
/** Windows only method used to check if the current process has requested
|
||||
* access rights on the given path. */
|
||||
private static native boolean access0(String path, int requestedAccess);
|
||||
|
||||
/**
|
||||
* Checks whether the current process has desired access rights on
|
||||
* the given path.
|
||||
*
|
||||
* Longer term this native function can be substituted with JDK7
|
||||
* function Files#isReadable, isWritable, isExecutable.
|
||||
*
|
||||
* @param path input path
|
||||
* @param desiredAccess ACCESS_READ, ACCESS_WRITE or ACCESS_EXECUTE
|
||||
* @return true if access is allowed
|
||||
* @throws IOException I/O exception on error
|
||||
*/
|
||||
public static boolean access(String path, AccessRight desiredAccess)
|
||||
throws IOException {
|
||||
return access0(path, desiredAccess.accessRight());
|
||||
}
|
||||
|
||||
static {
|
||||
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
try {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -160,11 +161,7 @@ public class DiskChecker {
|
|||
+ dir.toString());
|
||||
}
|
||||
|
||||
if (Shell.WINDOWS) {
|
||||
checkAccessByFileSystemInteraction(dir);
|
||||
} else {
|
||||
checkAccessByFileMethods(dir);
|
||||
}
|
||||
checkAccessByFileMethods(dir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -177,68 +174,19 @@ public class DiskChecker {
|
|||
*/
|
||||
private static void checkAccessByFileMethods(File dir)
|
||||
throws DiskErrorException {
|
||||
if (!dir.canRead()) {
|
||||
if (!FileUtil.canRead(dir)) {
|
||||
throw new DiskErrorException("Directory is not readable: "
|
||||
+ dir.toString());
|
||||
}
|
||||
|
||||
if (!dir.canWrite()) {
|
||||
if (!FileUtil.canWrite(dir)) {
|
||||
throw new DiskErrorException("Directory is not writable: "
|
||||
+ dir.toString());
|
||||
}
|
||||
|
||||
if (!dir.canExecute()) {
|
||||
if (!FileUtil.canExecute(dir)) {
|
||||
throw new DiskErrorException("Directory is not executable: "
|
||||
+ dir.toString());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that the current running process can read, write, and execute the
|
||||
* given directory by attempting each of those operations on the file system.
|
||||
* This method contains several workarounds to known JVM bugs that cause
|
||||
* File.canRead, File.canWrite, and File.canExecute to return incorrect results
|
||||
* on Windows with NTFS ACLs. See:
|
||||
* http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6203387
|
||||
* These bugs are supposed to be fixed in JDK7.
|
||||
*
|
||||
* @param dir File to check
|
||||
* @throws DiskErrorException if dir is not readable, not writable, or not
|
||||
* executable
|
||||
*/
|
||||
private static void checkAccessByFileSystemInteraction(File dir)
|
||||
throws DiskErrorException {
|
||||
// Make sure we can read the directory by listing it.
|
||||
if (dir.list() == null) {
|
||||
throw new DiskErrorException("Directory is not readable: "
|
||||
+ dir.toString());
|
||||
}
|
||||
|
||||
// Make sure we can write to the directory by creating a temp file in it.
|
||||
try {
|
||||
File tempFile = File.createTempFile("checkDirAccess", null, dir);
|
||||
if (!tempFile.delete()) {
|
||||
throw new DiskErrorException("Directory is not writable: "
|
||||
+ dir.toString());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new DiskErrorException("Directory is not writable: "
|
||||
+ dir.toString(), e);
|
||||
}
|
||||
|
||||
// Make sure the directory is executable by trying to cd into it. This
|
||||
// launches a separate process. It does not change the working directory of
|
||||
// the current process.
|
||||
try {
|
||||
String[] cdCmd = new String[] { "cmd", "/C", "cd",
|
||||
dir.getAbsolutePath() };
|
||||
Shell.execCommand(null, cdCmd, SHELL_TIMEOUT);
|
||||
} catch (Shell.ExitCodeException e) {
|
||||
throw new DiskErrorException("Directory is not executable: "
|
||||
+ dir.toString(), e);
|
||||
} catch (IOException e) {
|
||||
throw new DiskErrorException("Directory is not executable: "
|
||||
+ dir.toString(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -812,6 +812,42 @@ cleanup:
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_apache_hadoop_io_nativeio_NativeIO_Windows
|
||||
* Method: access0
|
||||
* Signature: (Ljava/lang/String;I)Z
|
||||
*/
|
||||
JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_access0
|
||||
(JNIEnv *env, jclass clazz, jstring jpath, jint jaccess)
|
||||
{
|
||||
#ifdef UNIX
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function access0(path, access) is not supported on Unix");
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
LPCWSTR path = NULL;
|
||||
DWORD dwRtnCode = ERROR_SUCCESS;
|
||||
ACCESS_MASK access = (ACCESS_MASK)jaccess;
|
||||
BOOL allowed = FALSE;
|
||||
|
||||
path = (LPCWSTR) (*env)->GetStringChars(env, jpath, NULL);
|
||||
if (!path) goto cleanup; // exception was thrown
|
||||
|
||||
dwRtnCode = CheckAccessForCurrentUser(path, access, &allowed);
|
||||
if (dwRtnCode != ERROR_SUCCESS) {
|
||||
throw_ioe(env, dwRtnCode);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (path) (*env)->ReleaseStringChars(env, jpath, path);
|
||||
|
||||
return (jboolean)allowed;
|
||||
#endif
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env,
|
||||
jclass clazz, jstring jsrc, jstring jdst)
|
||||
|
|
|
@ -110,6 +110,11 @@ void SystemInfoUsage();
|
|||
DWORD GetFileInformationByName(__in LPCWSTR pathName, __in BOOL followLink,
|
||||
__out LPBY_HANDLE_FILE_INFORMATION lpFileInformation);
|
||||
|
||||
DWORD CheckAccessForCurrentUser(
|
||||
__in PCWSTR pathName,
|
||||
__in ACCESS_MASK requestedAccess,
|
||||
__out BOOL *allowed);
|
||||
|
||||
DWORD ConvertToLongPath(__in PCWSTR path, __deref_out PWSTR *newPath);
|
||||
|
||||
DWORD GetSidFromAcctNameW(__in PCWSTR acctName, __out PSID* ppSid);
|
||||
|
|
|
@ -567,7 +567,7 @@ static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
|
|||
PSID pSid,
|
||||
PACCESS_MASK pAccessRights)
|
||||
{
|
||||
AUTHZ_RESOURCE_MANAGER_HANDLE hManager;
|
||||
AUTHZ_RESOURCE_MANAGER_HANDLE hManager = NULL;
|
||||
LUID unusedId = { 0 };
|
||||
AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClientContext = NULL;
|
||||
DWORD dwRtnCode = ERROR_SUCCESS;
|
||||
|
@ -581,6 +581,10 @@ static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
|
|||
return GetLastError();
|
||||
}
|
||||
|
||||
// Pass AUTHZ_SKIP_TOKEN_GROUPS to the function to avoid querying user group
|
||||
// information for access check. This allows us to model POSIX permissions
|
||||
// on Windows, where a user can have less permissions than a group it
|
||||
// belongs to.
|
||||
if(!AuthzInitializeContextFromSid(AUTHZ_SKIP_TOKEN_GROUPS,
|
||||
pSid, hManager, NULL, unusedId, NULL, &hAuthzClientContext))
|
||||
{
|
||||
|
@ -594,16 +598,115 @@ static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
|
|||
ret = dwRtnCode;
|
||||
goto GetEffectiveRightsForSidEnd;
|
||||
}
|
||||
if (!AuthzFreeContext(hAuthzClientContext))
|
||||
{
|
||||
ret = GetLastError();
|
||||
goto GetEffectiveRightsForSidEnd;
|
||||
}
|
||||
|
||||
GetEffectiveRightsForSidEnd:
|
||||
if (hManager != NULL)
|
||||
{
|
||||
(void)AuthzFreeResourceManager(hManager);
|
||||
}
|
||||
if (hAuthzClientContext != NULL)
|
||||
{
|
||||
(void)AuthzFreeContext(hAuthzClientContext);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Function: CheckAccessForCurrentUser
|
||||
//
|
||||
// Description:
|
||||
// Checks if the current process has the requested access rights on the given
|
||||
// path. Based on the following MSDN article:
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ff394771(v=vs.85).aspx
|
||||
//
|
||||
// Returns:
|
||||
// ERROR_SUCCESS: on success
|
||||
//
|
||||
DWORD CheckAccessForCurrentUser(
|
||||
__in PCWSTR pathName,
|
||||
__in ACCESS_MASK requestedAccess,
|
||||
__out BOOL *allowed)
|
||||
{
|
||||
DWORD dwRtnCode = ERROR_SUCCESS;
|
||||
|
||||
LPWSTR longPathName = NULL;
|
||||
HANDLE hProcessToken = NULL;
|
||||
PSECURITY_DESCRIPTOR pSd = NULL;
|
||||
|
||||
AUTHZ_RESOURCE_MANAGER_HANDLE hManager = NULL;
|
||||
AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClientContext = NULL;
|
||||
LUID Luid = {0, 0};
|
||||
|
||||
ACCESS_MASK currentUserAccessRights = 0;
|
||||
|
||||
// Prepend the long path prefix if needed
|
||||
dwRtnCode = ConvertToLongPath(pathName, &longPathName);
|
||||
if (dwRtnCode != ERROR_SUCCESS)
|
||||
{
|
||||
goto CheckAccessEnd;
|
||||
}
|
||||
|
||||
// Get SD of the given path. OWNER and DACL security info must be
|
||||
// requested, otherwise, AuthzAccessCheck fails with invalid parameter
|
||||
// error.
|
||||
dwRtnCode = GetNamedSecurityInfo(longPathName, SE_FILE_OBJECT,
|
||||
OWNER_SECURITY_INFORMATION | GROUP_SECURITY_INFORMATION |
|
||||
DACL_SECURITY_INFORMATION,
|
||||
NULL, NULL, NULL, NULL, &pSd);
|
||||
if (dwRtnCode != ERROR_SUCCESS)
|
||||
{
|
||||
goto CheckAccessEnd;
|
||||
}
|
||||
|
||||
// Get current process token
|
||||
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hProcessToken))
|
||||
{
|
||||
dwRtnCode = GetLastError();
|
||||
goto CheckAccessEnd;
|
||||
}
|
||||
|
||||
if (!AuthzInitializeResourceManager(AUTHZ_RM_FLAG_NO_AUDIT, NULL, NULL,
|
||||
NULL, NULL, &hManager))
|
||||
{
|
||||
dwRtnCode = GetLastError();
|
||||
goto CheckAccessEnd;
|
||||
}
|
||||
|
||||
if(!AuthzInitializeContextFromToken(0, hProcessToken, hManager, NULL,
|
||||
Luid, NULL, &hAuthzClientContext))
|
||||
{
|
||||
dwRtnCode = GetLastError();
|
||||
goto CheckAccessEnd;
|
||||
}
|
||||
|
||||
dwRtnCode = GetAccess(hAuthzClientContext, pSd, ¤tUserAccessRights);
|
||||
if (dwRtnCode != ERROR_SUCCESS)
|
||||
{
|
||||
goto CheckAccessEnd;
|
||||
}
|
||||
|
||||
*allowed = ((currentUserAccessRights & requestedAccess) == requestedAccess);
|
||||
|
||||
CheckAccessEnd:
|
||||
LocalFree(longPathName);
|
||||
LocalFree(pSd);
|
||||
if (hProcessToken != NULL)
|
||||
{
|
||||
CloseHandle(hProcessToken);
|
||||
}
|
||||
if (hManager != NULL)
|
||||
{
|
||||
(void)AuthzFreeResourceManager(hManager);
|
||||
}
|
||||
if (hAuthzClientContext != NULL)
|
||||
{
|
||||
(void)AuthzFreeContext(hAuthzClientContext);
|
||||
}
|
||||
|
||||
return dwRtnCode;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Function: FindFileOwnerAndPermission
|
||||
//
|
||||
|
|
|
@ -353,15 +353,15 @@ public class TestFileUtil {
|
|||
}
|
||||
|
||||
private static void grantPermissions(final File f) {
|
||||
f.setReadable(true);
|
||||
f.setWritable(true);
|
||||
f.setExecutable(true);
|
||||
FileUtil.setReadable(f, true);
|
||||
FileUtil.setWritable(f, true);
|
||||
FileUtil.setExecutable(f, true);
|
||||
}
|
||||
|
||||
private static void revokePermissions(final File f) {
|
||||
f.setWritable(false);
|
||||
f.setExecutable(false);
|
||||
f.setReadable(false);
|
||||
FileUtil.setWritable(f, false);
|
||||
FileUtil.setExecutable(f, false);
|
||||
FileUtil.setReadable(f, false);
|
||||
}
|
||||
|
||||
// Validates the return value.
|
||||
|
|
|
@ -61,7 +61,7 @@ public class TestLocalFileSystem {
|
|||
|
||||
@After
|
||||
public void after() throws IOException {
|
||||
base.setWritable(true);
|
||||
FileUtil.setWritable(base, true);
|
||||
FileUtil.fullyDelete(base);
|
||||
assertTrue(!base.exists());
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ public class TestLocalFileSystem {
|
|||
final File dir1 = new File(base, "dir1");
|
||||
final File dir2 = new File(dir1, "dir2");
|
||||
dir2.mkdirs();
|
||||
assertTrue(dir2.exists() && dir2.canWrite());
|
||||
assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
|
||||
|
||||
final String dataFileName = "corruptedData";
|
||||
final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
|
||||
|
@ -302,7 +302,7 @@ public class TestLocalFileSystem {
|
|||
// this is a hack to force the #reportChecksumFailure() method to stop
|
||||
// climbing up at the 'base' directory and use 'dir1/bad_files' as the
|
||||
// corrupted files storage:
|
||||
base.setWritable(false);
|
||||
FileUtil.setWritable(base, false);
|
||||
|
||||
FSDataInputStream dataFsdis = fileSys.open(dataPath);
|
||||
FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
|
||||
|
|
|
@ -240,6 +240,44 @@ public class TestNativeIO {
|
|||
|
||||
}
|
||||
|
||||
/** Validate access checks on Windows */
|
||||
@Test (timeout = 30000)
|
||||
public void testAccess() throws Exception {
|
||||
if (!Path.WINDOWS) {
|
||||
return;
|
||||
}
|
||||
|
||||
File testFile = new File(TEST_DIR, "testfileaccess");
|
||||
assertTrue(testFile.createNewFile());
|
||||
|
||||
// Validate ACCESS_READ
|
||||
FileUtil.setReadable(testFile, false);
|
||||
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_READ));
|
||||
|
||||
FileUtil.setReadable(testFile, true);
|
||||
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_READ));
|
||||
|
||||
// Validate ACCESS_WRITE
|
||||
FileUtil.setWritable(testFile, false);
|
||||
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_WRITE));
|
||||
|
||||
FileUtil.setWritable(testFile, true);
|
||||
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_WRITE));
|
||||
|
||||
// Validate ACCESS_EXECUTE
|
||||
FileUtil.setExecutable(testFile, false);
|
||||
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
|
||||
|
||||
FileUtil.setExecutable(testFile, true);
|
||||
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
|
||||
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testOpenMissingWithoutCreate() throws Exception {
|
||||
if (Path.WINDOWS) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
|
||||
/**
|
||||
* Create a temporary directory in which sockets can be created.
|
||||
|
@ -37,7 +38,7 @@ public class TemporarySocketDirectory implements Closeable {
|
|||
dir = new File(tmp, "socks." + (System.currentTimeMillis() +
|
||||
"." + (new Random().nextInt())));
|
||||
dir.mkdirs();
|
||||
dir.setWritable(true, true);
|
||||
FileUtil.setWritable(dir, true);
|
||||
}
|
||||
|
||||
public File getDir() {
|
||||
|
|
|
@ -28,6 +28,8 @@ import java.lang.management.ManagementFactory;
|
|||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
|
||||
public class TestShell extends TestCase {
|
||||
|
||||
private static class Command extends Shell {
|
||||
|
@ -92,7 +94,7 @@ public class TestShell extends TestCase {
|
|||
PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
|
||||
writer.println(timeoutCommand);
|
||||
writer.close();
|
||||
shellFile.setExecutable(true);
|
||||
FileUtil.setExecutable(shellFile, true);
|
||||
Shell.ShellCommandExecutor shexc
|
||||
= new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
|
||||
null, null, 100);
|
||||
|
|
|
@ -400,6 +400,9 @@ Release 2.0.5-beta - UNRELEASED
|
|||
HDFS-4705. Address HDFS test failures on Windows because of invalid
|
||||
dfs.namenode.name.dir. (Ivan Mitic via suresh)
|
||||
|
||||
HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
|
||||
(Arpit Agarwal via suresh)
|
||||
|
||||
Release 2.0.4-alpha - 2013-04-25
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.test.MockitoUtil;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
@ -73,6 +74,17 @@ public class TestDFSHAAdmin {
|
|||
private static String HOST_A = "1.2.3.1";
|
||||
private static String HOST_B = "1.2.3.2";
|
||||
|
||||
// Fencer shell commands that always return true and false respectively
|
||||
// on Unix.
|
||||
private static String FENCER_TRUE_COMMAND_UNIX = "shell(true)";
|
||||
private static String FENCER_FALSE_COMMAND_UNIX = "shell(false)";
|
||||
|
||||
// Fencer shell commands that always return true and false respectively
|
||||
// on Windows. Lacking POSIX 'true' and 'false' commands we use the DOS
|
||||
// commands 'rem' and 'help.exe'.
|
||||
private static String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)";
|
||||
private static String FENCER_FALSE_COMMAND_WINDOWS = "shell(help.exe /? >NUL)";
|
||||
|
||||
private HdfsConfiguration getHAConf() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);
|
||||
|
@ -89,6 +101,16 @@ public class TestDFSHAAdmin {
|
|||
return conf;
|
||||
}
|
||||
|
||||
public static String getFencerTrueCommand() {
|
||||
return Shell.WINDOWS ?
|
||||
FENCER_TRUE_COMMAND_WINDOWS : FENCER_TRUE_COMMAND_UNIX;
|
||||
}
|
||||
|
||||
public static String getFencerFalseCommand() {
|
||||
return Shell.WINDOWS ?
|
||||
FENCER_FALSE_COMMAND_WINDOWS : FENCER_FALSE_COMMAND_UNIX;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
|
||||
|
@ -173,7 +195,7 @@ public class TestDFSHAAdmin {
|
|||
// Turn on auto-HA in the config
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
|
||||
// Should fail without the forcemanual flag
|
||||
|
@ -250,7 +272,7 @@ public class TestDFSHAAdmin {
|
|||
public void testFailoverWithFencerConfigured() throws Exception {
|
||||
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2"));
|
||||
}
|
||||
|
@ -259,7 +281,7 @@ public class TestDFSHAAdmin {
|
|||
public void testFailoverWithFencerAndNameservice() throws Exception {
|
||||
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
|
||||
}
|
||||
|
@ -268,7 +290,7 @@ public class TestDFSHAAdmin {
|
|||
public void testFailoverWithFencerConfiguredAndForce() throws Exception {
|
||||
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
}
|
||||
|
@ -277,7 +299,7 @@ public class TestDFSHAAdmin {
|
|||
public void testFailoverWithForceActive() throws Exception {
|
||||
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive"));
|
||||
}
|
||||
|
@ -286,7 +308,7 @@ public class TestDFSHAAdmin {
|
|||
public void testFailoverWithInvalidFenceArg() throws Exception {
|
||||
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
|
||||
}
|
||||
|
@ -312,7 +334,7 @@ public class TestDFSHAAdmin {
|
|||
// Turn on auto-HA in the config
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2"));
|
||||
|
@ -323,7 +345,7 @@ public class TestDFSHAAdmin {
|
|||
public void testForceFenceOptionListedBeforeArgs() throws Exception {
|
||||
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
|
||||
}
|
||||
|
@ -359,23 +381,23 @@ public class TestDFSHAAdmin {
|
|||
|
||||
HdfsConfiguration conf = getHAConf();
|
||||
// Set the default fencer to succeed
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
|
||||
// Set the NN-specific fencer to fail. Should fail to fence.
|
||||
conf.set(nnSpecificKey, "shell(false)");
|
||||
conf.set(nnSpecificKey, getFencerFalseCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
conf.unset(nnSpecificKey);
|
||||
|
||||
// Set an NS-specific fencer to fail. Should fail.
|
||||
conf.set(nsSpecificKey, "shell(false)");
|
||||
conf.set(nsSpecificKey, getFencerFalseCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
|
||||
// Set the NS-specific fencer to succeed. Should succeed
|
||||
conf.set(nsSpecificKey, "shell(true)");
|
||||
conf.set(nsSpecificKey, getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -114,7 +115,8 @@ public class TestDFSHAAdminMiniCluster {
|
|||
|
||||
@Test
|
||||
public void testTryFailoverToSafeMode() throws Exception {
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
|
||||
TestDFSHAAdmin.getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
|
||||
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
|
||||
|
@ -136,10 +138,17 @@ public class TestDFSHAAdminMiniCluster {
|
|||
// tmp file, so we can verify that the args were substituted right
|
||||
File tmpFile = File.createTempFile("testFencer", ".txt");
|
||||
tmpFile.deleteOnExit();
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
|
||||
"shell(echo -n $target_nameserviceid.$target_namenodeid " +
|
||||
"$target_port $dfs_ha_namenode_id > " +
|
||||
tmpFile.getAbsolutePath() + ")");
|
||||
if (Shell.WINDOWS) {
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
|
||||
"shell(echo %target_nameserviceid%.%target_namenodeid% " +
|
||||
"%target_port% %dfs_ha_namenode_id% > " +
|
||||
tmpFile.getAbsolutePath() + ")");
|
||||
} else {
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
|
||||
"shell(echo -n $target_nameserviceid.$target_namenodeid " +
|
||||
"$target_port $dfs_ha_namenode_id > " +
|
||||
tmpFile.getAbsolutePath() + ")");
|
||||
}
|
||||
|
||||
// Test failover with fencer
|
||||
tool.setConf(conf);
|
||||
|
@ -156,9 +165,11 @@ public class TestDFSHAAdminMiniCluster {
|
|||
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
|
||||
// The fence script should run with the configuration from the target
|
||||
// node, rather than the configuration from the fencing node
|
||||
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",
|
||||
Files.toString(tmpFile, Charsets.UTF_8));
|
||||
// node, rather than the configuration from the fencing node. Strip
|
||||
// out any trailing spaces and CR/LFs which may be present on Windows.
|
||||
String fenceCommandOutput =Files.toString(tmpFile, Charsets.UTF_8).
|
||||
replaceAll(" *[\r\n]+", "");
|
||||
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", fenceCommandOutput);
|
||||
tmpFile.delete();
|
||||
|
||||
// Test failover with forceactive option
|
||||
|
@ -181,7 +192,8 @@ public class TestDFSHAAdminMiniCluster {
|
|||
assertFalse(tmpFile.exists());
|
||||
|
||||
// Test failover with force fence listed before the other arguments
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
|
||||
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
|
||||
TestDFSHAAdmin.getFencerTrueCommand());
|
||||
tool.setConf(conf);
|
||||
assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue