svn merge -c 1585544 Merging from trunk to branch-2 to fix:HDFS-6191. Disable quota checks when replaying edit log. Contributed by Kihwal Lee.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1585545 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d7d7f090cb
commit
fef85fad1a
|
@ -35,6 +35,8 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6167. Relocate the non-public API classes in the hdfs.client package.
|
||||
(szetszwo)
|
||||
|
||||
HDFS-6191. Disable quota checks when replaying edit log. (kihwal)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
|
|
@ -134,6 +134,7 @@ public class BackupNode extends NameNode {
|
|||
BN_SAFEMODE_EXTENSION_DEFAULT);
|
||||
BackupImage bnImage = new BackupImage(conf);
|
||||
this.namesystem = new FSNamesystem(conf, bnImage);
|
||||
namesystem.dir.disableQuotaChecks();
|
||||
bnImage.setNamesystem(namesystem);
|
||||
bnImage.recoverCreateRead();
|
||||
}
|
||||
|
|
|
@ -117,6 +117,7 @@ public class FSDirectory implements Closeable {
|
|||
FSImage fsImage;
|
||||
private final FSNamesystem namesystem;
|
||||
private volatile boolean ready = false;
|
||||
private volatile boolean skipQuotaCheck = false; //skip while consuming edits
|
||||
private final int maxComponentLength;
|
||||
private final int maxDirItems;
|
||||
private final int lsLimit; // max list limit
|
||||
|
@ -283,6 +284,16 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/** Enable quota verification */
|
||||
void enableQuotaChecks() {
|
||||
skipQuotaCheck = false;
|
||||
}
|
||||
|
||||
/** Disable quota verification */
|
||||
void disableQuotaChecks() {
|
||||
skipQuotaCheck = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given filename to the fs.
|
||||
* @throws FileAlreadyExistsException
|
||||
|
@ -1825,7 +1836,7 @@ public class FSDirectory implements Closeable {
|
|||
if (numOfINodes > inodes.length) {
|
||||
numOfINodes = inodes.length;
|
||||
}
|
||||
if (checkQuota) {
|
||||
if (checkQuota && !skipQuotaCheck) {
|
||||
verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
|
||||
}
|
||||
unprotectedUpdateCount(iip, numOfINodes, nsDelta, dsDelta);
|
||||
|
@ -2117,7 +2128,7 @@ public class FSDirectory implements Closeable {
|
|||
*/
|
||||
private void verifyQuotaForRename(INode[] src, INode[] dst)
|
||||
throws QuotaExceededException {
|
||||
if (!ready) {
|
||||
if (!ready || skipQuotaCheck) {
|
||||
// Do not check quota if edits log is still being processed
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1019,7 +1019,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
dir.fsImage.editLog.openForWrite();
|
||||
}
|
||||
|
||||
|
||||
// Enable quota checks.
|
||||
dir.enableQuotaChecks();
|
||||
if (haEnabled) {
|
||||
// Renew all of the leases before becoming active.
|
||||
// This is because, while we were in standby mode,
|
||||
|
@ -1126,6 +1128,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
blockManager.setPostponeBlocksFromFuture(true);
|
||||
|
||||
// Disable quota checks while in standby.
|
||||
dir.disableQuotaChecks();
|
||||
editLogTailer = new EditLogTailer(this, conf);
|
||||
editLogTailer.start();
|
||||
if (standbyShouldCheckpoint) {
|
||||
|
|
|
@ -248,6 +248,9 @@ public class SecondaryNameNode implements Runnable {
|
|||
|
||||
namesystem = new FSNamesystem(conf, checkpointImage, true);
|
||||
|
||||
// Disable quota checks
|
||||
namesystem.dir.disableQuotaChecks();
|
||||
|
||||
// Initialize other scheduling parameters from the configuration
|
||||
checkpointConf = new CheckpointConf(conf);
|
||||
|
||||
|
@ -850,7 +853,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
Collection<URI> imageDirs,
|
||||
List<URI> editsDirs) throws IOException {
|
||||
super(conf, imageDirs, editsDirs);
|
||||
|
||||
|
||||
// the 2NN never writes edits -- it only downloads them. So
|
||||
// we shouldn't have any editLog instance. Setting to null
|
||||
// makes sure we don't accidentally depend on it.
|
||||
|
|
|
@ -20,12 +20,15 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -55,6 +58,7 @@ public class TestFSDirectory {
|
|||
private final Path file5 = new Path(sub1, "z_file5");
|
||||
|
||||
private final Path sub2 = new Path(dir, "sub2");
|
||||
private final Path file6 = new Path(sub2, "file6");
|
||||
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
|
@ -125,6 +129,41 @@ public class TestFSDirectory {
|
|||
fsdir.imageLoadComplete();
|
||||
Assert.assertTrue(fsdir.isReady());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipQuotaCheck() throws Exception {
|
||||
try {
|
||||
// set quota. nsQuota of 1 means no files can be created
|
||||
// under this directory.
|
||||
hdfs.setQuota(sub2, 1, Long.MAX_VALUE);
|
||||
|
||||
// create a file
|
||||
try {
|
||||
// this should fail
|
||||
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
|
||||
throw new IOException("The create should have failed.");
|
||||
} catch (NSQuotaExceededException qe) {
|
||||
// ignored
|
||||
}
|
||||
// disable the quota check and retry. this should succeed.
|
||||
fsdir.disableQuotaChecks();
|
||||
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
|
||||
|
||||
// trying again after re-enabling the check.
|
||||
hdfs.delete(file6, false); // cleanup
|
||||
fsdir.enableQuotaChecks();
|
||||
try {
|
||||
// this should fail
|
||||
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
|
||||
throw new IOException("The create should have failed.");
|
||||
} catch (NSQuotaExceededException qe) {
|
||||
// ignored
|
||||
}
|
||||
} finally {
|
||||
hdfs.delete(file6, false); // cleanup, in case the test failed in the middle.
|
||||
hdfs.setQuota(sub2, Long.MAX_VALUE, Long.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
static void checkClassName(String line) {
|
||||
int i = line.lastIndexOf('(');
|
||||
|
|
Loading…
Reference in New Issue