HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P. McCabe)

(cherry picked from commit 7b424f938c)
This commit is contained in:
Colin Patrick Mccabe 2015-06-22 14:37:10 -07:00
parent c71f57b2a0
commit cbd11681ce
3 changed files with 48 additions and 31 deletions

View File

@ -637,6 +637,9 @@ Release 2.7.1 - UNRELEASED
HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal) HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
via Colin P. McCabe)
BUG FIXES BUG FIXES

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File; import java.io.File;
import java.io.FilenameFilter; import java.io.FilenameFilter;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -127,23 +128,8 @@ public abstract class NNUpgradeUtil {
for (String s : fileNameList) { for (String s : fileNameList) {
File prevFile = new File(tmpDir, s); File prevFile = new File(tmpDir, s);
Preconditions.checkState(prevFile.canRead(),
"Edits log file " + s + " is not readable.");
File newFile = new File(curDir, prevFile.getName()); File newFile = new File(curDir, prevFile.getName());
Preconditions.checkState(newFile.createNewFile(), Files.createLink(newFile.toPath(), prevFile.toPath());
"Cannot create new edits log file in " + curDir);
EditLogFileInputStream in = new EditLogFileInputStream(prevFile);
EditLogFileOutputStream out =
new EditLogFileOutputStream(conf, newFile, 512*1024);
FSEditLogOp logOp = in.nextValidOp();
while (logOp != null) {
out.write(logOp);
logOp = in.nextOp();
}
out.setReadyToFlush();
out.flushAndSync(true);
out.close();
in.close();
} }
} }

View File

@ -30,13 +30,16 @@ import static org.junit.Assert.fail;
import java.io.File; import java.io.File;
import java.io.FilenameFilter; import java.io.FilenameFilter;
import java.io.IOException; import java.io.IOException;
import java.util.LinkedList; import java.nio.file.Files;
import java.util.List; import java.util.List;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -45,7 +48,11 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite; import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -54,6 +61,8 @@ import org.junit.BeforeClass;
import org.junit.Ignore; import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
@ -466,31 +475,50 @@ public class TestDFSUpgrade {
log("Normal NameNode upgrade", 1); log("Normal NameNode upgrade", 1);
File[] created = File[] created =
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
List<String> beforeUpgrade = new LinkedList<>();
for (final File createdDir : created) { for (final File createdDir : created) {
List<String> fileNameList = List<String> fileNameList =
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE); IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
beforeUpgrade.addAll(fileNameList); for (String fileName : fileNameList) {
String tmpFileName = fileName + ".tmp";
File existingFile = new File(createdDir, fileName);
File tmpFile = new File(createdDir, tmpFileName);
Files.move(existingFile.toPath(), tmpFile.toPath());
File newFile = new File(createdDir, fileName);
Preconditions.checkState(newFile.createNewFile(),
"Cannot create new edits log file in " + createdDir);
EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID,
false);
EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
(int)tmpFile.length());
out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
FSEditLogOp logOp = in.readOp();
while (logOp != null) {
out.write(logOp);
logOp = in.readOp();
}
out.setReadyToFlush();
out.flushAndSync(true);
out.close();
Files.delete(tmpFile.toPath());
}
} }
cluster = createCluster(); cluster = createCluster();
List<String> afterUpgrade = new LinkedList<>(); DFSInotifyEventInputStream ieis =
for (final File createdDir : created) { cluster.getFileSystem().getInotifyEventStream(0);
List<String> fileNameList = EventBatch batch = ieis.poll();
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE); Event[] events = batch.getEvents();
afterUpgrade.addAll(fileNameList); assertTrue("Should be able to get transactions before the upgrade.",
} events.length > 0);
assertEquals(events[0].getEventType(), Event.EventType.CREATE);
for (String s : beforeUpgrade) { assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
assertTrue(afterUpgrade.contains(s));
}
cluster.shutdown(); cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} }
private static enum EditLogsFilter implements FilenameFilter { private enum EditLogsFilter implements FilenameFilter {
INSTANCE; INSTANCE;
@Override @Override