HADOOP-8562. Merge r1464780 HDFS-4625, r1465869 YARN-557, r1466148 HDFS-4674, r1466306 HADOOP-9437, r1466746 YARN-487

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1485934 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-05-24 05:34:41 +00:00
parent 0a96ae767f
commit 9b910c5c88
12 changed files with 83 additions and 17 deletions

View File

@ -326,6 +326,10 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9353. Activate native-win maven profile by default on Windows.
(Arpit Agarwal via szetszwo)
HADOOP-9437. TestNativeIO#testRenameTo fails on Windows due to assumption
that POSIX errno is embedded in NativeIOException. (Chris Nauroth via
suresh)
Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -816,6 +816,7 @@ JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env,
jclass clazz, jstring jsrc, jstring jdst)
{
#ifdef UNIX
const char *src = NULL, *dst = NULL;
src = (*env)->GetStringUTFChars(env, jsrc, NULL);
@ -829,6 +830,23 @@ jclass clazz, jstring jsrc, jstring jdst)
done:
if (src) (*env)->ReleaseStringUTFChars(env, jsrc, src);
if (dst) (*env)->ReleaseStringUTFChars(env, jdst, dst);
#endif
#ifdef WINDOWS
LPCWSTR src = NULL, dst = NULL;
src = (LPCWSTR) (*env)->GetStringChars(env, jsrc, NULL);
if (!src) goto done; // exception was thrown
dst = (LPCWSTR) (*env)->GetStringChars(env, jdst, NULL);
if (!dst) goto done; // exception was thrown
if (!MoveFile(src, dst)) {
throw_ioe(env, GetLastError());
}
done:
if (src) (*env)->ReleaseStringChars(env, jsrc, src);
if (dst) (*env)->ReleaseStringChars(env, jdst, dst);
#endif
}
/**

View File

@ -446,7 +446,13 @@ public class TestNativeIO {
NativeIO.renameTo(nonExistentFile, targetFile);
Assert.fail();
} catch (NativeIOException e) {
Assert.assertEquals(e.getErrno(), Errno.ENOENT);
if (Path.WINDOWS) {
Assert.assertEquals(
String.format("The system cannot find the file specified.%n"),
e.getMessage());
} else {
Assert.assertEquals(Errno.ENOENT, e.getErrno());
}
}
// Test renaming a file to itself. It should succeed and do nothing.
@ -465,7 +471,13 @@ public class TestNativeIO {
NativeIO.renameTo(sourceFile, badTarget);
Assert.fail();
} catch (NativeIOException e) {
Assert.assertEquals(e.getErrno(), Errno.ENOTDIR);
if (Path.WINDOWS) {
Assert.assertEquals(
String.format("The parameter is incorrect.%n"),
e.getMessage());
} else {
Assert.assertEquals(Errno.ENOTDIR, e.getErrno());
}
}
FileUtils.deleteQuietly(TEST_DIR);

View File

@ -369,6 +369,13 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4584. Skip TestNNWithQJM.testNewNamenodeTakesOverWriter() on Windows.
(Arpit Agarwal via szetszwo)
HDFS-4625. Make TestNNWithQJM#testNewNamenodeTakesOverWriter work on
Windows. (Ivan Mitic via suresh)
HDFS-4674. TestBPOfferService fails on Windows due to failure parsing
datanode data directory as URI. (Chris Nauroth via suresh)
Release 2.0.4-alpha - 2013-04-25
INCOMPATIBLE CHANGES

View File

@ -98,9 +98,6 @@ public class TestNNWithQJM {
@Test (timeout = 30000)
public void testNewNamenodeTakesOverWriter() throws Exception {
// Skip the test on Windows. See HDFS-4584.
assumeTrue(!Path.WINDOWS);
File nn1Dir = new File(
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
File nn2Dir = new File(
@ -111,15 +108,19 @@ public class TestNNWithQJM {
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
// Start the cluster once to generate the dfs dirs
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.checkExitOnShutdown(false)
.build();
try {
cluster.getFileSystem().mkdirs(TEST_PATH);
// Shutdown the cluster before making a copy of the namenode dir
// to release all file locks, otherwise, the copy will fail on
// some platforms.
cluster.shutdown();
try {
// Start a second NN pointed to the same quorum.
// We need to copy the image dir from the first NN -- or else
// the new NN will just be rejected because of Namespace mismatch.
@ -127,6 +128,16 @@ public class TestNNWithQJM {
FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
new Path(nn2Dir.getAbsolutePath()), false, conf);
// Start the cluster again
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.format(false)
.manageNameDfsDirs(false)
.checkExitOnShutdown(false)
.build();
cluster.getFileSystem().mkdirs(TEST_PATH);
Configuration conf2 = new Configuration();
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nn2Dir.getAbsolutePath());

View File

@ -17,10 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
@ -68,6 +70,8 @@ public class TestBPOfferService {
TestBPOfferService.class);
private static final ExtendedBlock FAKE_BLOCK =
new ExtendedBlock(FAKE_BPID, 12345L);
private static final String TEST_BUILD_DATA = System.getProperty(
"test.build.data", "build/test/data");
static {
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
@ -90,6 +94,8 @@ public class TestBPOfferService {
mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))

View File

@ -376,6 +376,12 @@ Release 2.0.5-beta - UNRELEASED
YARN-491. TestContainerLogsPage fails on Windows. (Chris Nauroth via hitesh)
YARN-557. Fix TestUnmanagedAMLauncher failure on Windows. (Chris Nauroth via
vinodkv)
YARN-487. Modify path manipulation in LocalDirsHandlerService to let
TestDiskFailures pass on Windows. (Chris Nauroth via vinodkv)
Release 2.0.4-alpha - 2013-04-25
INCOMPATIBLE CHANGES

View File

@ -73,7 +73,7 @@ public interface ApplicationConstants {
* $USER
* Final, non-modifiable.
*/
USER("USER"),
USER(Shell.WINDOWS ? "USERNAME": "USER"),
/**
* $LOGNAME

View File

@ -699,7 +699,7 @@ public class ApplicationMaster {
.newRecord(ContainerLaunchContext.class);
String jobUserName = System.getenv(ApplicationConstants.Environment.USER
.name());
.key());
ctx.setUser(jobUserName);
LOG.info("Setting user in ContainerLaunchContext to: " + jobUserName);

View File

@ -30,6 +30,7 @@ import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.junit.AfterClass;
@ -50,7 +51,7 @@ public class TestUnmanagedAMLauncher {
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(
TestUnmanagedAMLauncher.class.getName(), 1, 1, 1);
TestUnmanagedAMLauncher.class.getSimpleName(), 1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
URL url = Thread.currentThread().getContextClassLoader()
@ -93,7 +94,7 @@ public class TestUnmanagedAMLauncher {
return envClassPath;
}
@Test(timeout=10000)
@Test(timeout=30000)
public void testDSShell() throws Exception {
String classpath = getTestRuntimeClasspath();
String javaHome = System.getenv("JAVA_HOME");
@ -112,7 +113,8 @@ public class TestUnmanagedAMLauncher {
javaHome
+ "/bin/java -Xmx512m "
+ "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster "
+ "--container_memory 128 --num_containers 1 --priority 0 --shell_command ls" };
+ "--container_memory 128 --num_containers 1 --priority 0 "
+ "--shell_command " + (Shell.WINDOWS ? "dir" : "ls") };
LOG.info("Initializing Launcher");
UnmanagedAMLauncher launcher = new UnmanagedAMLauncher(new Configuration(

View File

@ -307,7 +307,7 @@ public class LocalDirsHandlerService extends AbstractService {
URI uriPath = (new Path(paths[i])).toUri();
if (uriPath.getScheme() == null
|| uriPath.getScheme().equals(FILE_SCHEME)) {
validPaths.add(uriPath.getPath());
validPaths.add(new Path(uriPath.getPath()).toString());
} else {
LOG.warn(paths[i] + " is not a valid path. Path should be with "
+ FILE_SCHEME + " scheme or without scheme");

View File

@ -133,10 +133,10 @@ public class TestDiskFailures {
dirSvc.init(conf);
List<String> localDirs = dirSvc.getLocalDirs();
Assert.assertEquals(1, localDirs.size());
Assert.assertEquals(localDir2, localDirs.get(0));
Assert.assertEquals(new Path(localDir2).toString(), localDirs.get(0));
List<String> logDirs = dirSvc.getLogDirs();
Assert.assertEquals(1, logDirs.size());
Assert.assertEquals(logDir1, logDirs.get(0));
Assert.assertEquals(new Path(logDir1).toString(), logDirs.get(0));
}
private void testDirsFailures(boolean localORLogDirs) throws IOException {