Merge branch 'trunk' into HDFS-6581
This commit is contained in:
commit
e9edafc731
|
@ -545,6 +545,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HADOOP-11009. Add Timestamp Preservation to DistCp (Gary Steelman via aw)
|
||||
|
||||
HADOOP-11101. How about inputstream close statement from catch block to
|
||||
finally block in FileContext#copy() ( skrho via vinayakumarb )
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||
|
|
|
@ -2021,10 +2021,9 @@ public final class FileContext {
|
|||
EnumSet.of(CreateFlag.CREATE);
|
||||
out = create(qDst, createFlag);
|
||||
IOUtils.copyBytes(in, out, conf, true);
|
||||
} catch (IOException e) {
|
||||
} finally {
|
||||
IOUtils.closeStream(out);
|
||||
IOUtils.closeStream(in);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
if (deleteSource) {
|
||||
|
|
|
@ -582,6 +582,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HDFS-7138. Fix hftp to work with encryption. (clamb via wang)
|
||||
|
||||
HDFS-7118. Improve diagnostics on storage directory rename operations by
|
||||
using NativeIO#renameTo in Storage#rename. (cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
|
|
@ -36,6 +36,8 @@ import org.apache.hadoop.fs.FileUtil;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIOException;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
|
@ -986,9 +988,13 @@ public abstract class Storage extends StorageInfo {
|
|||
}
|
||||
|
||||
public static void rename(File from, File to) throws IOException {
|
||||
if (!from.renameTo(to))
|
||||
throw new IOException("Failed to rename "
|
||||
+ from.getCanonicalPath() + " to " + to.getCanonicalPath());
|
||||
try {
|
||||
NativeIO.renameTo(from, to);
|
||||
} catch (NativeIOException e) {
|
||||
throw new IOException("Failed to rename " + from.getCanonicalPath()
|
||||
+ " to " + to.getCanonicalPath() + " due to failure in native rename. "
|
||||
+ e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
|
|||
import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
|
@ -42,8 +43,11 @@ import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException
|
|||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
@ -59,6 +63,9 @@ public class TestFileJournalManager {
|
|||
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
|
||||
}
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
conf = new Configuration();
|
||||
|
@ -472,6 +479,36 @@ public class TestFileJournalManager {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that internal renames are done using native code on platforms that
|
||||
* have it. The native rename includes more detailed information about the
|
||||
* failure, which can be useful for troubleshooting.
|
||||
*/
|
||||
@Test
|
||||
public void testDoPreUpgradeIOError() throws IOException {
|
||||
File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
|
||||
List<URI> editUris = Collections.singletonList(storageDir.toURI());
|
||||
NNStorage storage = setupEdits(editUris, 5);
|
||||
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
|
||||
assertNotNull(sd);
|
||||
// Change storage directory so that renaming current to previous.tmp fails.
|
||||
FileUtil.setWritable(storageDir, false);
|
||||
FileJournalManager jm = null;
|
||||
try {
|
||||
jm = new FileJournalManager(conf, sd, storage);
|
||||
exception.expect(IOException.class);
|
||||
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
exception.expectMessage("failure in native rename");
|
||||
}
|
||||
jm.doPreUpgrade();
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, jm);
|
||||
// Restore permissions on storage directory and make sure we can delete.
|
||||
FileUtil.setWritable(storageDir, true);
|
||||
FileUtil.fullyDelete(storageDir);
|
||||
}
|
||||
}
|
||||
|
||||
private static String getLogsAsString(
|
||||
FileJournalManager fjm, long firstTxId) throws IOException {
|
||||
return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, false));
|
||||
|
|
|
@ -222,6 +222,8 @@ Trunk (Unreleased)
|
|||
|
||||
MAPREDUCE-6077. native-task: Remove CustomModule examples in nativetask (seanzhong)
|
||||
|
||||
MAPREDUCE-6078. native-task: fix gtest build on macosx (Binglin Chang)
|
||||
|
||||
Release 2.6.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -249,11 +249,6 @@ add_executable(nttest
|
|||
${D}/test/test_commons.cc)
|
||||
|
||||
|
||||
IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
# macos clang with libc++ does not have tr1/tuple, just tuple
|
||||
SET_TARGET_PROPERTIES(nttest PROPERTIES COMPILE_FLAGS "-DGTEST_USE_OWN_TR1_TUPLE=1")
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(nttest
|
||||
nativetask_static
|
||||
gtest
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,39 +0,0 @@
|
|||
// Copyright 2006, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
GTEST_API_ int main(int argc, char **argv) {
|
||||
std::cout << "Running main() from gtest_main.cc\n";
|
||||
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue