HADOP-8662. Merge r1455637 for HADOOP-9388, r1455956 for HDFS-4593, r1456060 for HDFS-4582 and r1457057 for HDFS-4603

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1485907 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-05-24 00:42:28 +00:00
parent 830909a910
commit a6c4b42353
6 changed files with 47 additions and 29 deletions

View File

@ -315,6 +315,11 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9364. PathData#expandAsGlob does not return correct results for HADOOP-9364. PathData#expandAsGlob does not return correct results for
absolute paths on Windows. (Ivan Mitic via suresh) absolute paths on Windows. (Ivan Mitic via suresh)
HADOOP-8973. DiskChecker cannot reliably detect an inaccessible disk on
Windows with NTFS ACLs. (Chris Nauroth via suresh)
HADOOP-9388. TestFsShellCopy fails on Windows. (Ivan Mitic via suresh)
Release 2.0.4-beta - UNRELEASED Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.fs.shell; package org.apache.hadoop.fs.shell;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.net.URI; import java.net.URI;
@ -235,7 +234,13 @@ protected PathData getTargetPath(PathData src) throws IOException {
*/ */
protected void copyFileToTarget(PathData src, PathData target) throws IOException { protected void copyFileToTarget(PathData src, PathData target) throws IOException {
src.fs.setVerifyChecksum(verifyChecksum); src.fs.setVerifyChecksum(verifyChecksum);
copyStreamToTarget(src.fs.open(src.path), target); InputStream in = null;
try {
in = src.fs.open(src.path);
copyStreamToTarget(in, target);
} finally {
IOUtils.closeStream(in);
}
if(preserve) { if(preserve) {
target.fs.setTimes( target.fs.setTimes(
target.path, target.path,

View File

@ -352,6 +352,12 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4287. HTTPFS tests fail on Windows. (Chris Nauroth via suresh) HDFS-4287. HTTPFS tests fail on Windows. (Chris Nauroth via suresh)
HDFS-4593. TestSaveNamespace fails on Windows. (Arpit Agarwal via suresh)
HDFS-4582. TestHostsFiles fails on Windows. (Ivan Mitic via suresh)
HDFS-4603. TestMiniDFSCluster fails on Windows. (Ivan Mitic via suresh)
Release 2.0.4-alpha - 2013-04-25 Release 2.0.4-alpha - 2013-04-25
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -65,7 +65,7 @@ public void tearDown() {
* *
* @throws Throwable on a failure * @throws Throwable on a failure
*/ */
@Test @Test(timeout=100000)
public void testClusterWithoutSystemProperties() throws Throwable { public void testClusterWithoutSystemProperties() throws Throwable {
System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
@ -74,7 +74,8 @@ public void testClusterWithoutSystemProperties() throws Throwable {
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try { try {
assertEquals(c1Path+"/data", cluster.getDataDirectory()); assertEquals(new File(c1Path + "/data"),
new File(cluster.getDataDirectory()));
} finally { } finally {
cluster.shutdown(); cluster.shutdown();
} }
@ -84,7 +85,7 @@ public void testClusterWithoutSystemProperties() throws Throwable {
* Bring up two clusters and assert that they are in different directories. * Bring up two clusters and assert that they are in different directories.
* @throws Throwable on a failure * @throws Throwable on a failure
*/ */
@Test @Test(timeout=100000)
public void testDualClusters() throws Throwable { public void testDualClusters() throws Throwable {
File testDataCluster2 = new File(testDataPath, CLUSTER_2); File testDataCluster2 = new File(testDataPath, CLUSTER_2);
File testDataCluster3 = new File(testDataPath, CLUSTER_3); File testDataCluster3 = new File(testDataPath, CLUSTER_3);
@ -95,7 +96,7 @@ public void testDualClusters() throws Throwable {
MiniDFSCluster cluster3 = null; MiniDFSCluster cluster3 = null;
try { try {
String dataDir2 = cluster2.getDataDirectory(); String dataDir2 = cluster2.getDataDirectory();
assertEquals(c2Path + "/data", dataDir2); assertEquals(new File(c2Path + "/data"), new File(dataDir2));
//change the data dir //change the data dir
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
testDataCluster3.getAbsolutePath()); testDataCluster3.getAbsolutePath());

View File

@ -120,12 +120,13 @@ public void testHostsExcludeDfshealthJsp() throws Exception {
InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress(); InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
LOG.info("nnaddr = '" + nnHttpAddress + "'"); LOG.info("nnaddr = '" + nnHttpAddress + "'");
URL nnjsp = new URL("http://" + nnHttpAddress.getHostName() + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp"); String nnHostName = nnHttpAddress.getHostName();
URL nnjsp = new URL("http://" + nnHostName + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp");
LOG.info("fetching " + nnjsp); LOG.info("fetching " + nnjsp);
String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp)); String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp));
LOG.info("got " + dfshealthPage); LOG.info("got " + dfshealthPage);
assertTrue("dfshealth should contain localhost, got:" + dfshealthPage, assertTrue("dfshealth should contain " + nnHostName + ", got:" + dfshealthPage,
dfshealthPage.contains("localhost")); dfshealthPage.contains(nnHostName));
} finally { } finally {
cluster.shutdown(); cluster.shutdown();

View File

@ -41,6 +41,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -219,7 +220,7 @@ private void saveNamespaceWithInjectedFault(Fault fault) throws Exception {
* Verify that a saveNamespace command brings faulty directories * Verify that a saveNamespace command brings faulty directories
* in fs.name.dir and fs.edit.dir back online. * in fs.name.dir and fs.edit.dir back online.
*/ */
@Test @Test (timeout=30000)
public void testReinsertnamedirsInSavenamespace() throws Exception { public void testReinsertnamedirsInSavenamespace() throws Exception {
// create a configuration with the key to restore error // create a configuration with the key to restore error
// directories in fs.name.dir // directories in fs.name.dir
@ -237,10 +238,13 @@ public void testReinsertnamedirsInSavenamespace() throws Exception {
FSImage spyImage = spy(originalImage); FSImage spyImage = spy(originalImage);
fsn.dir.fsImage = spyImage; fsn.dir.fsImage = spyImage;
FileSystem fs = FileSystem.getLocal(conf);
File rootDir = storage.getStorageDir(0).getRoot(); File rootDir = storage.getStorageDir(0).getRoot();
rootDir.setExecutable(false); Path rootPath = new Path(rootDir.getPath(), "current");
rootDir.setWritable(false); final FsPermission permissionNone = new FsPermission((short) 0);
rootDir.setReadable(false); final FsPermission permissionAll = new FsPermission(
FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE);
fs.setPermission(rootPath, permissionNone);
try { try {
doAnEdit(fsn, 1); doAnEdit(fsn, 1);
@ -257,9 +261,7 @@ public void testReinsertnamedirsInSavenamespace() throws Exception {
" bad directories.", " bad directories.",
storage.getRemovedStorageDirs().size() == 1); storage.getRemovedStorageDirs().size() == 1);
rootDir.setExecutable(true); fs.setPermission(rootPath, permissionAll);
rootDir.setWritable(true);
rootDir.setReadable(true);
// The next call to savenamespace should try inserting the // The next call to savenamespace should try inserting the
// erroneous directory back to fs.name.dir. This command should // erroneous directory back to fs.name.dir. This command should
@ -290,9 +292,7 @@ public void testReinsertnamedirsInSavenamespace() throws Exception {
LOG.info("Reloaded image is good."); LOG.info("Reloaded image is good.");
} finally { } finally {
if (rootDir.exists()) { if (rootDir.exists()) {
rootDir.setExecutable(true); fs.setPermission(rootPath, permissionAll);
rootDir.setWritable(true);
rootDir.setReadable(true);
} }
if (fsn != null) { if (fsn != null) {
@ -305,27 +305,27 @@ public void testReinsertnamedirsInSavenamespace() throws Exception {
} }
} }
@Test @Test (timeout=30000)
public void testRTEWhileSavingSecondImage() throws Exception { public void testRTEWhileSavingSecondImage() throws Exception {
saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_RTE); saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_RTE);
} }
@Test @Test (timeout=30000)
public void testIOEWhileSavingSecondImage() throws Exception { public void testIOEWhileSavingSecondImage() throws Exception {
saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_IOE); saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_IOE);
} }
@Test @Test (timeout=30000)
public void testCrashInAllImageDirs() throws Exception { public void testCrashInAllImageDirs() throws Exception {
saveNamespaceWithInjectedFault(Fault.SAVE_ALL_FSIMAGES); saveNamespaceWithInjectedFault(Fault.SAVE_ALL_FSIMAGES);
} }
@Test @Test (timeout=30000)
public void testCrashWhenWritingVersionFiles() throws Exception { public void testCrashWhenWritingVersionFiles() throws Exception {
saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ALL); saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ALL);
} }
@Test @Test (timeout=30000)
public void testCrashWhenWritingVersionFileInOneDir() throws Exception { public void testCrashWhenWritingVersionFileInOneDir() throws Exception {
saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ONE); saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ONE);
} }
@ -337,7 +337,7 @@ public void testCrashWhenWritingVersionFileInOneDir() throws Exception {
* failed checkpoint since it only affected ".ckpt" files, not * failed checkpoint since it only affected ".ckpt" files, not
* valid image files * valid image files
*/ */
@Test @Test (timeout=30000)
public void testFailedSaveNamespace() throws Exception { public void testFailedSaveNamespace() throws Exception {
doTestFailedSaveNamespace(false); doTestFailedSaveNamespace(false);
} }
@ -347,7 +347,7 @@ public void testFailedSaveNamespace() throws Exception {
* the operator restores the directories and calls it again. * the operator restores the directories and calls it again.
* This should leave the NN in a clean state for next start. * This should leave the NN in a clean state for next start.
*/ */
@Test @Test (timeout=30000)
public void testFailedSaveNamespaceWithRecovery() throws Exception { public void testFailedSaveNamespaceWithRecovery() throws Exception {
doTestFailedSaveNamespace(true); doTestFailedSaveNamespace(true);
} }
@ -421,7 +421,7 @@ public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure)
} }
} }
@Test @Test (timeout=30000)
public void testSaveWhileEditsRolled() throws Exception { public void testSaveWhileEditsRolled() throws Exception {
Configuration conf = getConf(); Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE); NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
@ -457,7 +457,7 @@ public void testSaveWhileEditsRolled() throws Exception {
} }
} }
@Test @Test (timeout=30000)
public void testTxIdPersistence() throws Exception { public void testTxIdPersistence() throws Exception {
Configuration conf = getConf(); Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE); NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
@ -603,7 +603,7 @@ public Void call() throws Exception {
} }
} }
@Test @Test (timeout=30000)
public void testSaveNamespaceWithDanglingLease() throws Exception { public void testSaveNamespaceWithDanglingLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
.numDataNodes(1).build(); .numDataNodes(1).build();