HDFS-9134. Move LEASE_{SOFTLIMIT,HARDLIMIT}_PERIOD constants from HdfsServerConstants to HdfsConstants. Contributed by Mingliang Liu.

This commit is contained in:
Haohui Mai 2015-09-24 20:19:16 -07:00
parent d1b9b85244
commit 692b1a45ce
12 changed files with 58 additions and 44 deletions

View File

@ -93,6 +93,29 @@ public final class HdfsConstants {
//for write pipeline //for write pipeline
public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by this soft limit and a
* {@link HdfsConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
* soft limit expires, the writer has sole write access to the file. If the
* soft limit expires and the client fails to close the file or renew the
* lease, another client can preempt the lease.
*/
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by a
* {@link HdfsConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
* limit. If after the hard limit expires and the client has failed to renew
* the lease, HDFS assumes that the client has quit and will automatically
* close the file on behalf of the writer, and recover the lease.
*/
public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
// SafeMode actions // SafeMode actions
public enum SafeModeAction { public enum SafeModeAction {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET

View File

@ -959,6 +959,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7529. Consolidate encryption zone related implementation into a single HDFS-7529. Consolidate encryption zone related implementation into a single
class. (Rakesh R via wheat9) class. (Rakesh R via wheat9)
HDFS-9134. Move LEASE_{SOFTLIMIT,HARDLIMIT}_PERIOD constants from
HdfsServerConstants to HdfsConstants. (Mingliang Liu via wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -149,7 +149,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
@ -541,10 +540,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} catch (IOException e) { } catch (IOException e) {
// Abort if the lease has already expired. // Abort if the lease has already expired.
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal(); final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
if (elapsed > HdfsServerConstants.LEASE_HARDLIMIT_PERIOD) { if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
LOG.warn("Failed to renew lease for " + clientName + " for " LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= hard-limit =" + (elapsed/1000) + " seconds (>= hard-limit ="
+ (HdfsServerConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) " + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+ "Closing all files being written ...", e); + "Closing all files being written ...", e);
closeAllFilesBeingWritten(true); closeAllFilesBeingWritten(true);
} else { } else {

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -165,7 +165,7 @@ public class LeaseRenewer {
/** The time in milliseconds that the map became empty. */ /** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE; private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */ /** A fixed lease renewal time period in milliseconds */
private long renewal = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD/2; private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD / 2;
/** A daemon for renewing lease */ /** A daemon for renewing lease */
private Daemon daemon = null; private Daemon daemon = null;
@ -378,7 +378,7 @@ public class LeaseRenewer {
//update renewal time //update renewal time
if (renewal == dfsc.getConf().getHdfsTimeout()/2) { if (renewal == dfsc.getConf().getHdfsTimeout()/2) {
long min = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD; long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
for(DFSClient c : dfsclients) { for(DFSClient c : dfsclients) {
final int timeout = c.getConf().getHdfsTimeout(); final int timeout = c.getConf().getHdfsTimeout();
if (timeout > 0 && timeout < min) { if (timeout > 0 && timeout < min) {

View File

@ -25,6 +25,7 @@ import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@ -42,28 +43,14 @@ import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private @InterfaceAudience.Private
public interface HdfsServerConstants { public interface HdfsServerConstants {
int MIN_BLOCKS_FOR_WRITE = 1; int MIN_BLOCKS_FOR_WRITE = 1;
/** /**
* For a HDFS client to write to a file, a lease is granted; During the lease * Please see {@link HdfsConstants#LEASE_SOFTLIMIT_PERIOD} and
* period, no other client can write to the file. The writing client can * {@link HdfsConstants#LEASE_HARDLIMIT_PERIOD} for more information.
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by this soft limit and a
* {@link HdfsServerConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
* soft limit expires, the writer has sole write access to the file. If the
* soft limit expires and the client fails to close the file or renew the
* lease, another client can preempt the lease.
*/ */
long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; long LEASE_SOFTLIMIT_PERIOD = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
/** long LEASE_HARDLIMIT_PERIOD = HdfsConstants.LEASE_HARDLIMIT_PERIOD;
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by a
* {@link HdfsServerConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
* limit. If after the hard limit expires and the client has failed to renew
* the lease, HDFS assumes that the client has quit and will automatically
* close the file on behalf of the writer, and recover the lease.
*/
long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
// We need to limit the length and depth of a path in the filesystem. // We need to limit the length and depth of a path in the filesystem.
// HADOOP-438 // HADOOP-438

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
@ -156,7 +157,7 @@ public class BackupNode extends NameNode {
// Backup node should never do lease recovery, // Backup node should never do lease recovery,
// therefore lease hard limit should never expire. // therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod( namesystem.leaseManager.setLeasePeriod(
HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
// register with the active name-node // register with the active name-node
registerWith(nsInfo); registerWith(nsInfo);

View File

@ -34,6 +34,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
@ -69,8 +70,8 @@ public class LeaseManager {
private final FSNamesystem fsnamesystem; private final FSNamesystem fsnamesystem;
private long softLimit = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD; private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
private long hardLimit = HdfsServerConstants.LEASE_HARDLIMIT_PERIOD; private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD;
// //
// Used for handling lock-leases // Used for handling lock-leases

View File

@ -40,8 +40,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@ -111,7 +111,7 @@ public class TestFileAppend4 {
// set the soft limit to be 1 second so that the // set the soft limit to be 1 second so that the
// namenode triggers lease recovery upon append request // namenode triggers lease recovery upon append request
cluster.setLeasePeriod(1000, HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
// Trying recovery // Trying recovery
int tries = 60; int tries = 60;

View File

@ -43,8 +43,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -99,8 +99,8 @@ public class TestLease {
// call renewLease() manually. // call renewLease() manually.
// make it look like the soft limit has been exceeded. // make it look like the soft limit has been exceeded.
LeaseRenewer originalRenewer = dfs.getLeaseRenewer(); LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
dfs.lastLeaseRenewal = Time.monotonicNow() dfs.lastLeaseRenewal = Time.monotonicNow() -
- HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD - 1000; HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try { try {
dfs.renewLease(); dfs.renewLease();
} catch (IOException e) {} } catch (IOException e) {}
@ -116,7 +116,7 @@ public class TestLease {
// make it look like the hard limit has been exceeded. // make it look like the hard limit has been exceeded.
dfs.lastLeaseRenewal = Time.monotonicNow() dfs.lastLeaseRenewal = Time.monotonicNow()
- HdfsServerConstants.LEASE_HARDLIMIT_PERIOD - 1000; - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease(); dfs.renewLease();
// this should not work. // this should not work.

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
@ -332,8 +333,8 @@ public class TestLeaseRecovery2 {
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
// Reset default lease periods // Reset default lease periods
cluster.setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); HdfsConstants.LEASE_HARDLIMIT_PERIOD);
//create a file //create a file
// create a random file name // create a random file name
String filestr = "/foo" + AppendTestUtil.nextInt(); String filestr = "/foo" + AppendTestUtil.nextInt();

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -649,8 +650,8 @@ public class TestFileTruncate {
checkBlockRecovery(p); checkBlockRecovery(p);
NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); HdfsConstants.LEASE_HARDLIMIT_PERIOD);
checkFullFile(p, newLength, contents); checkFullFile(p, newLength, contents);
fs.delete(p, false); fs.delete(p, false);

View File

@ -37,9 +37,9 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
@ -302,10 +302,8 @@ public class TestINodeFileUnderConstructionWithSnapshot {
fsn.writeUnlock(); fsn.writeUnlock();
} }
} finally { } finally {
NameNodeAdapter.setLeasePeriod( NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
fsn, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsServerConstants.LEASE_HARDLIMIT_PERIOD);
} }
} }
} }