Merge r1555021 through r1556550 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1556552 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-01-08 14:36:09 +00:00
commit f3ee35ab28
125 changed files with 3363 additions and 1689 deletions

View File

@ -92,6 +92,11 @@
<artifactId>hadoop-minikdc</artifactId> <artifactId>hadoop-minikdc</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<scope>compile</scope>
</dependency>
</dependencies> </dependencies>
<build> <build>

View File

@ -16,10 +16,15 @@ package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.NameValuePair;
import javax.servlet.ServletException; import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import java.io.IOException; import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Properties; import java.util.Properties;
/** /**
@ -48,6 +53,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
*/ */
public static final String ANONYMOUS_ALLOWED = TYPE + ".anonymous.allowed"; public static final String ANONYMOUS_ALLOWED = TYPE + ".anonymous.allowed";
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private boolean acceptAnonymous; private boolean acceptAnonymous;
/** /**
@ -114,6 +120,18 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
return true; return true;
} }
private String getUserName(HttpServletRequest request) {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(), UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (PseudoAuthenticator.USER_NAME.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
/** /**
* Authenticates an HTTP client request. * Authenticates an HTTP client request.
* <p/> * <p/>
@ -139,7 +157,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response) public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException { throws IOException, AuthenticationException {
AuthenticationToken token; AuthenticationToken token;
String userName = request.getParameter(PseudoAuthenticator.USER_NAME); String userName = getUserName(request);
if (userName == null) { if (userName == null) {
if (getAcceptAnonymous()) { if (getAcceptAnonymous()) {
token = AuthenticationToken.ANONYMOUS; token = AuthenticationToken.ANONYMOUS;

View File

@ -94,7 +94,7 @@ public class TestPseudoAuthenticationHandler {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(PseudoAuthenticator.USER_NAME)).thenReturn("user"); Mockito.when(request.getQueryString()).thenReturn(PseudoAuthenticator.USER_NAME + "=" + "user");
AuthenticationToken token = handler.authenticate(request, response); AuthenticationToken token = handler.authenticate(request, response);

View File

@ -108,6 +108,8 @@ Trunk (Unreleased)
HADOOP-10141. Create KeyProvider API to separate encryption key storage HADOOP-10141. Create KeyProvider API to separate encryption key storage
from the applications. (omalley) from the applications. (omalley)
HADOOP-10201. Add listing to KeyProvider API. (Larry McCay via omalley)
BUG FIXES BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled. HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -580,6 +582,9 @@ Release 2.3.0 - UNRELEASED
HADOOP-10090. Jobtracker metrics not updated properly after execution HADOOP-10090. Jobtracker metrics not updated properly after execution
of a mapreduce job. (ivanmi) of a mapreduce job. (ivanmi)
HADOOP-10193. hadoop-auth's PseudoAuthenticationHandler can consume getInputStream.
(gchanan via tucu)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -36,8 +36,11 @@ import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.security.UnrecoverableKeyException; import java.security.UnrecoverableKeyException;
import java.security.cert.CertificateException; import java.security.cert.CertificateException;
import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import java.util.Enumeration;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
/** /**
@ -56,6 +59,7 @@ import java.util.Map;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class JavaKeyStoreProvider extends KeyProvider { public class JavaKeyStoreProvider extends KeyProvider {
private static final String KEY_METADATA = "KeyMetadata";
public static final String SCHEME_NAME = "jceks"; public static final String SCHEME_NAME = "jceks";
public static final String KEYSTORE_PASSWORD_NAME = public static final String KEYSTORE_PASSWORD_NAME =
"HADOOP_KEYSTORE_PASSWORD"; "HADOOP_KEYSTORE_PASSWORD";
@ -117,6 +121,44 @@ public class JavaKeyStoreProvider extends KeyProvider {
return new KeyVersion(versionName, key.getEncoded()); return new KeyVersion(versionName, key.getEncoded());
} }
@Override
public List<String> getKeys() throws IOException {
ArrayList<String> list = new ArrayList<String>();
String alias = null;
try {
Enumeration<String> e = keyStore.aliases();
while (e.hasMoreElements()) {
alias = e.nextElement();
// only include the metadata key names in the list of names
if (!alias.contains("@")) {
list.add(alias);
}
}
} catch (KeyStoreException e) {
throw new IOException("Can't get key " + alias + " from " + path, e);
}
return list;
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
List<KeyVersion> list = new ArrayList<KeyVersion>();
Metadata km = getMetadata(name);
if (km != null) {
int latestVersion = km.getVersions();
KeyVersion v = null;
String versionName = null;
for (int i = 0; i < latestVersion; i++) {
versionName = buildVersionName(name, i);
v = getKeyVersion(versionName);
if (v != null) {
list.add(v);
}
}
}
return list;
}
@Override @Override
public Metadata getMetadata(String name) throws IOException { public Metadata getMetadata(String name) throws IOException {
if (cache.containsKey(name)) { if (cache.containsKey(name)) {
@ -288,7 +330,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
@Override @Override
public String getFormat() { public String getFormat() {
return "KeyMetadata"; return KEY_METADATA;
} }
@Override @Override

View File

@ -254,6 +254,20 @@ public abstract class KeyProvider {
public abstract KeyVersion getKeyVersion(String versionName public abstract KeyVersion getKeyVersion(String versionName
) throws IOException; ) throws IOException;
/**
* Get the key names for all keys.
* @return the list of key names
* @throws IOException
*/
public abstract List<String> getKeys() throws IOException;
/**
* Get the key material for all versions of a specific key name.
* @return the list of key material
* @throws IOException
*/
public abstract List<KeyVersion> getKeyVersions(String name) throws IOException;
/** /**
* Get the current version of the key, which should be used for encrypting new * Get the current version of the key, which should be used for encrypting new
* data. * data.

View File

@ -20,8 +20,10 @@ package org.apache.hadoop.crypto.key;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -142,4 +144,32 @@ public class UserProvider extends KeyProvider {
return null; return null;
} }
} }
@Override
public List<String> getKeys() throws IOException {
List<String> list = new ArrayList<String>();
List<Text> keys = credentials.getAllSecretKeys();
for (Text key : keys) {
if (key.find("@") == -1) {
list.add(key.toString());
}
}
return list;
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
List<KeyVersion> list = new ArrayList<KeyVersion>();
Metadata km = getMetadata(name);
if (km != null) {
int latestVersion = km.getVersions();
for (int i = 0; i < latestVersion; i++) {
KeyVersion v = getKeyVersion(buildVersionName(name, i));
if (v != null) {
list.add(v);
}
}
}
return list;
}
} }

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.http.lib.StaticUserWebFilter; import org.apache.hadoop.http.lib.StaticUserWebFilter;
import org.apache.hadoop.security.authorize.Service;
/** /**
* This class contains constants for configuration keys used * This class contains constants for configuration keys used
@ -240,4 +239,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/** Default value for IPC_SERVER_CONNECTION_IDLE_SCAN_INTERVAL_KEY */ /** Default value for IPC_SERVER_CONNECTION_IDLE_SCAN_INTERVAL_KEY */
public static final int IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT = public static final int IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT =
10000; 10000;
public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
"hadoop.user.group.metrics.percentiles.intervals";
} }

View File

@ -766,8 +766,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
zkClient = getNewZooKeeper(); zkClient = getNewZooKeeper();
LOG.debug("Created new connection for " + this); LOG.debug("Created new connection for " + this);
} }
void terminateConnection() { @InterfaceAudience.Private
public void terminateConnection() {
if (zkClient == null) { if (zkClient == null) {
return; return;
} }

View File

@ -29,7 +29,9 @@ import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -73,15 +75,6 @@ public class Credentials implements Writable {
this.addAll(credentials); this.addAll(credentials);
} }
/**
* Returns the key bytes for the alias
* @param alias the alias for the key
* @return key for this alias
*/
public byte[] getSecretKey(Text alias) {
return secretKeysMap.get(alias);
}
/** /**
* Returns the Token object for the alias * Returns the Token object for the alias
* @param alias the alias for the Token * @param alias the alias for the Token
@ -117,6 +110,15 @@ public class Credentials implements Writable {
public int numberOfTokens() { public int numberOfTokens() {
return tokenMap.size(); return tokenMap.size();
} }
/**
* Returns the key bytes for the alias
* @param alias the alias for the key
* @return key for this alias
*/
public byte[] getSecretKey(Text alias) {
return secretKeysMap.get(alias);
}
/** /**
* @return number of keys in the in-memory map * @return number of keys in the in-memory map
@ -142,6 +144,16 @@ public class Credentials implements Writable {
secretKeysMap.remove(alias); secretKeysMap.remove(alias);
} }
/**
* Return all the secret key entries in the in-memory map
*/
public List<Text> getAllSecretKeys() {
List<Text> list = new java.util.ArrayList<Text>();
list.addAll(secretKeysMap.keySet());
return list;
}
/** /**
* Convenience method for reading a token storage file, and loading the Tokens * Convenience method for reading a token storage file, and loading the Tokens
* therein in the passed UGI * therein in the passed UGI

View File

@ -138,6 +138,7 @@ public class Groups {
List<String> groupList = impl.getGroups(user); List<String> groupList = impl.getGroups(user);
long endMs = Time.monotonicNow(); long endMs = Time.monotonicNow();
long deltaMs = endMs - startMs ; long deltaMs = endMs - startMs ;
UserGroupInformation.metrics.addGetGroups(deltaMs);
if (deltaMs > warningDeltaMs) { if (deltaMs > warningDeltaMs) {
LOG.warn("Potential performance problem: getGroups(user=" + user +") " + LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
"took " + deltaMs + " milliseconds."); "took " + deltaMs + " milliseconds.");

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.security;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN; import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
@ -58,6 +59,8 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.security.authentication.util.KerberosUtil;
@ -92,14 +95,27 @@ public class UserGroupInformation {
*/ */
@Metrics(about="User and group related metrics", context="ugi") @Metrics(about="User and group related metrics", context="ugi")
static class UgiMetrics { static class UgiMetrics {
final MetricsRegistry registry = new MetricsRegistry("UgiMetrics");
@Metric("Rate of successful kerberos logins and latency (milliseconds)") @Metric("Rate of successful kerberos logins and latency (milliseconds)")
MutableRate loginSuccess; MutableRate loginSuccess;
@Metric("Rate of failed kerberos logins and latency (milliseconds)") @Metric("Rate of failed kerberos logins and latency (milliseconds)")
MutableRate loginFailure; MutableRate loginFailure;
@Metric("GetGroups") MutableRate getGroups;
MutableQuantiles[] getGroupsQuantiles;
static UgiMetrics create() { static UgiMetrics create() {
return DefaultMetricsSystem.instance().register(new UgiMetrics()); return DefaultMetricsSystem.instance().register(new UgiMetrics());
} }
void addGetGroups(long latency) {
getGroups.add(latency);
if (getGroupsQuantiles != null) {
for (MutableQuantiles q : getGroupsQuantiles) {
q.add(latency);
}
}
}
} }
/** /**
@ -250,6 +266,20 @@ public class UserGroupInformation {
groups = Groups.getUserToGroupsMappingService(conf); groups = Groups.getUserToGroupsMappingService(conf);
} }
UserGroupInformation.conf = conf; UserGroupInformation.conf = conf;
if (metrics.getGroupsQuantiles == null) {
int[] intervals = conf.getInts(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS);
if (intervals != null && intervals.length > 0) {
final int length = intervals.length;
MutableQuantiles[] getGroupsQuantiles = new MutableQuantiles[length];
for (int i = 0; i < length; i++) {
getGroupsQuantiles[i] = metrics.registry.newQuantiles(
"getGroups" + intervals[i] + "s",
"Get groups", "ops", "latency", intervals[i]);
}
metrics.getGroupsQuantiles = getGroupsQuantiles;
}
}
} }
/** /**

View File

@ -21,6 +21,7 @@ import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -160,6 +161,16 @@ public class TestKeyProviderFactory {
provider.getCurrentKey("key4").getMaterial()); provider.getCurrentKey("key4").getMaterial());
assertArrayEquals(key3, provider.getCurrentKey("key3").getMaterial()); assertArrayEquals(key3, provider.getCurrentKey("key3").getMaterial());
assertEquals("key3@0", provider.getCurrentKey("key3").getVersionName()); assertEquals("key3@0", provider.getCurrentKey("key3").getVersionName());
List<String> keys = provider.getKeys();
assertTrue("Keys should have been returned.", keys.size() == 2);
assertTrue("Returned Keys should have included key3.", keys.contains("key3"));
assertTrue("Returned Keys should have included key4.", keys.contains("key4"));
List<KeyVersion> kvl = provider.getKeyVersions("key3");
assertTrue("KeyVersions should have been returned for key3.", kvl.size() == 1);
assertTrue("KeyVersions should have included key3@0.", kvl.get(0).getVersionName().equals("key3@0"));
assertArrayEquals(key3, kvl.get(0).getMaterial());
} }
@Test @Test

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.security;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.TestSaslRPC;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authentication.util.KerberosName;
@ -40,9 +39,9 @@ import java.util.Collection;
import java.util.LinkedHashSet; import java.util.LinkedHashSet;
import java.util.Set; import java.util.Set;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
import static org.apache.hadoop.ipc.TestSaslRPC.*; import static org.apache.hadoop.ipc.TestSaslRPC.*;
import static org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
import static org.apache.hadoop.test.MetricsAsserts.*; import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
@ -55,6 +54,8 @@ public class TestUserGroupInformation {
final private static String GROUP3_NAME = "group3"; final private static String GROUP3_NAME = "group3";
final private static String[] GROUP_NAMES = final private static String[] GROUP_NAMES =
new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME}; new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME};
// Rollover interval of percentile metrics (in seconds)
private static final int PERCENTILES_INTERVAL = 1;
private static Configuration conf; private static Configuration conf;
/** /**
@ -80,7 +81,8 @@ public class TestUserGroupInformation {
// doesn't matter what it is, but getGroups needs it set... // doesn't matter what it is, but getGroups needs it set...
// use HADOOP_HOME environment variable to prevent interfering with logic // use HADOOP_HOME environment variable to prevent interfering with logic
// that finds winutils.exe // that finds winutils.exe
System.setProperty("hadoop.home.dir", System.getenv("HADOOP_HOME")); String home = System.getenv("HADOOP_HOME");
System.setProperty("hadoop.home.dir", (home != null ? home : "."));
// fake the realm is kerberos is enabled // fake the realm is kerberos is enabled
System.setProperty("java.security.krb5.kdc", ""); System.setProperty("java.security.krb5.kdc", "");
System.setProperty("java.security.krb5.realm", "DEFAULT.REALM"); System.setProperty("java.security.krb5.realm", "DEFAULT.REALM");
@ -150,11 +152,15 @@ public class TestUserGroupInformation {
/** Test login method */ /** Test login method */
@Test (timeout = 30000) @Test (timeout = 30000)
public void testLogin() throws Exception { public void testLogin() throws Exception {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
String.valueOf(PERCENTILES_INTERVAL));
UserGroupInformation.setConfiguration(conf);
// login from unix // login from unix
UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
assertEquals(UserGroupInformation.getCurrentUser(), assertEquals(UserGroupInformation.getCurrentUser(),
UserGroupInformation.getLoginUser()); UserGroupInformation.getLoginUser());
assertTrue(ugi.getGroupNames().length >= 1); assertTrue(ugi.getGroupNames().length >= 1);
verifyGroupMetrics(1);
// ensure that doAs works correctly // ensure that doAs works correctly
UserGroupInformation userGroupInfo = UserGroupInformation userGroupInfo =
@ -728,6 +734,21 @@ public class TestUserGroupInformation {
} }
} }
private static void verifyGroupMetrics(
long groups) throws InterruptedException {
MetricsRecordBuilder rb = getMetrics("UgiMetrics");
if (groups > 0) {
assertCounter("GetGroupsNumOps", groups, rb);
double avg = getDoubleGauge("GetGroupsAvgTime", rb);
assertTrue(avg >= 0.0);
// Sleep for an interval+slop to let the percentiles rollover
Thread.sleep((PERCENTILES_INTERVAL+1)*1000);
// Check that the percentiles were updated
assertQuantileGauges("GetGroups1s", rb);
}
}
/** /**
* Test for the case that UserGroupInformation.getCurrentUser() * Test for the case that UserGroupInformation.getCurrentUser()
* is called when the AccessControlContext has a Subject associated * is called when the AccessControlContext has a Subject associated

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer; import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer; import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
/** /**
* Main class for starting mountd daemon. This daemon implements the NFS * Main class for starting mountd daemon. This daemon implements the NFS
@ -71,8 +72,24 @@ abstract public class MountdBase {
startUDPServer(); startUDPServer();
startTCPServer(); startTCPServer();
if (register) { if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort); rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} }
} }
/**
* Priority of the mountd shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class Unregister implements Runnable {
@Override
public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
}
}
} }

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer; import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
/** /**
* Nfs server. Supports NFS v3 using {@link RpcProgram}. * Nfs server. Supports NFS v3 using {@link RpcProgram}.
@ -50,6 +51,8 @@ public abstract class Nfs3Base {
startTCPServer(); // Start TCP server startTCPServer(); // Start TCP server
if (register) { if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort); rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
} }
} }
@ -61,4 +64,16 @@ public abstract class Nfs3Base {
tcpServer.run(); tcpServer.run();
nfsBoundPort = tcpServer.getBoundPort(); nfsBoundPort = tcpServer.getBoundPort();
} }
/**
* Priority of the nfsd shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class Unregister implements Runnable {
@Override
public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
}
}
} }

View File

@ -78,23 +78,41 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
for (int vers = lowProgVersion; vers <= highProgVersion; vers++) { for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport, PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
port); port);
register(mapEntry); register(mapEntry, true);
}
}
/**
* Unregister this program with the local portmapper.
*/
public void unregister(int transport, int boundPort) {
if (boundPort != port) {
LOG.info("The bound port is " + boundPort
+ ", different with configured port " + port);
port = boundPort;
}
// Unregister all the program versions with portmapper for a given transport
for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
port);
register(mapEntry, false);
} }
} }
/** /**
* Register the program with Portmap or Rpcbind * Register the program with Portmap or Rpcbind
*/ */
protected void register(PortmapMapping mapEntry) { protected void register(PortmapMapping mapEntry, boolean set) {
XDR mappingRequest = PortmapRequest.create(mapEntry); XDR mappingRequest = PortmapRequest.create(mapEntry, set);
SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT, SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
mappingRequest); mappingRequest);
try { try {
registrationClient.run(); registrationClient.run();
} catch (IOException e) { } catch (IOException e) {
LOG.error("Registration failure with " + host + ":" + port String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port
+ ", portmap entry: " + mapEntry); + ", portmap entry: " + mapEntry);
throw new RuntimeException("Registration failure"); throw new RuntimeException(request + " failure");
} }
} }

View File

@ -31,13 +31,14 @@ public class PortmapRequest {
return PortmapMapping.deserialize(xdr); return PortmapMapping.deserialize(xdr);
} }
public static XDR create(PortmapMapping mapping) { public static XDR create(PortmapMapping mapping, boolean set) {
XDR request = new XDR(); XDR request = new XDR();
int procedure = set ? RpcProgramPortmap.PMAPPROC_SET
: RpcProgramPortmap.PMAPPROC_UNSET;
RpcCall call = RpcCall.getInstance( RpcCall call = RpcCall.getInstance(
RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)), RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, procedure,
RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(), new CredentialsNone(), new VerifierNone());
new VerifierNone());
call.write(request); call.write(request);
return mapping.serialize(request); return mapping.serialize(request);
} }

View File

@ -19,6 +19,10 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -29,7 +33,9 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ShutdownHookManager;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects; import com.google.common.base.Objects;
@ -94,7 +100,7 @@ class DFSClientCache {
DFSClientCache(Configuration config) { DFSClientCache(Configuration config) {
this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE); this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE);
} }
DFSClientCache(Configuration config, int clientCache) { DFSClientCache(Configuration config, int clientCache) {
this.config = config; this.config = config;
this.clientCache = CacheBuilder.newBuilder() this.clientCache = CacheBuilder.newBuilder()
@ -107,8 +113,52 @@ class DFSClientCache {
.expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS) .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
.removalListener(inputStreamRemovalListener()) .removalListener(inputStreamRemovalListener())
.build(inputStreamLoader()); .build(inputStreamLoader());
ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(),
SHUTDOWN_HOOK_PRIORITY);
} }
/**
* Priority of the FileSystem shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class CacheFinalizer implements Runnable {
@Override
public synchronized void run() {
try {
closeAll(true);
} catch (IOException e) {
LOG.info("DFSClientCache.closeAll() threw an exception:\n", e);
}
}
}
/**
* Close all DFSClient instances in the Cache.
* @param onlyAutomatic only close those that are marked for automatic closing
*/
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
List<IOException> exceptions = new ArrayList<IOException>();
ConcurrentMap<String, DFSClient> map = clientCache.asMap();
for (Entry<String, DFSClient> item : map.entrySet()) {
final DFSClient client = item.getValue();
if (client != null) {
try {
client.close();
} catch (IOException ioe) {
exceptions.add(ioe);
}
}
}
if (!exceptions.isEmpty()) {
throw MultipleIOException.createIOException(exceptions);
}
}
private CacheLoader<String, DFSClient> clientLoader() { private CacheLoader<String, DFSClient> clientLoader() {
return new CacheLoader<String, DFSClient>() { return new CacheLoader<String, DFSClient>() {
@Override @Override

View File

@ -13,10 +13,6 @@ Trunk (Unreleased)
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
HDFS-2832. Heterogeneous Storages support in HDFS phase 1 - treat DataNode
as a collection of storages (see breakdown of tasks below for features and
contributors).
IMPROVEMENTS IMPROVEMENTS
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common. HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
@ -246,8 +242,8 @@ Trunk (Unreleased)
HDFS-5651. Remove dfs.namenode.caching.enabled and improve CRM locking. HDFS-5651. Remove dfs.namenode.caching.enabled and improve CRM locking.
(cmccabe via wang) (cmccabe via wang)
HDFS-5496. Make replication queue initialization asynchronous. (Vinay via HDFS-5715. Use Snapshot ID to indicate the corresponding Snapshot for a
jing9) FileDiff/DirectoryDiff. (jing9)
OPTIMIZATIONS OPTIMIZATIONS
@ -452,12 +448,6 @@ Trunk (Unreleased)
HDFS-5626. dfsadmin -report shows incorrect cache values. (cmccabe) HDFS-5626. dfsadmin -report shows incorrect cache values. (cmccabe)
HDFS-5406. Send incremental block reports for all storages in a
single call. (Arpit Agarwal)
HDFS-5454. DataNode UUID should be assigned prior to FsDataset
initialization. (Arpit Agarwal)
HDFS-5679. TestCacheDirectives should handle the case where native code HDFS-5679. TestCacheDirectives should handle the case where native code
is not available. (wang) is not available. (wang)
@ -471,130 +461,19 @@ Trunk (Unreleased)
HDFS-5659. dfsadmin -report doesn't output cache information properly. HDFS-5659. dfsadmin -report doesn't output cache information properly.
(wang) (wang)
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS HDFS-5705. TestSecondaryNameNodeUpgrade#testChangeNsIDFails may fail due
to ConcurrentModificationException. (Ted Yu via brandonli)
HDFS-4985. Add storage type to the protocol and expose it in block report HDFS-5719. FSImage#doRollback() should close prevState before return
and block locations. (Arpit Agarwal) (Ted Yu via brandonli)
HDFS-5115. Make StorageID a UUID. (Arpit Agarwal) HDFS-5589. Namenode loops caching and uncaching when data should be
uncached (awang via cmccabe)
HDFS-5000. DataNode configuration should allow specifying storage type. HDFS-5724. modifyCacheDirective logging audit log command wrongly as
(Arpit Agarwal) addCacheDirective (Uma Maheswara Rao G via Colin Patrick McCabe)
HDFS-4987. Namenode changes to track multiple storages per datanode. HDFS-5726. Fix compilation error in AbstractINodeDiff for JDK7. (jing9)
(szetszwo)
HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987.
(Junping Du via szetszwo)
HDFS-5009. Include storage information in the LocatedBlock. (szetszwo)
HDFS-5134. Move blockContentsStale, heartbeatedSinceFailover and
firstBlockReport from DatanodeDescriptor to DatanodeStorageInfo; and
fix a synchronization problem in DatanodeStorageInfo. (szetszwo)
HDFS-5157. Add StorageType to FsVolume. (Junping Du via szetszwo)
HDFS-4990. Change BlockPlacementPolicy to choose storages instead of
datanodes. (szetszwo)
HDFS-5232. Protocol changes to transmit StorageUuid. (Arpit Agarwal)
HDFS-5233. Use Datanode UUID to identify Datanodes. (Arpit Agarwal)
HDFS-5222. Move block schedule information from DatanodeDescriptor to
DatanodeStorageInfo. (szetszwo)
HDFS-4988. Datanode must support all the volumes as individual storages.
(Arpit Agarwal)
HDFS-5377. Heartbeats from Datandode should include one storage report
per storage directory. (Arpit Agarwal)
HDFS-5398. NameNode changes to process storage reports per storage
directory. (Arpit Agarwal)
HDFS-5390. Send one incremental block report per storage directory.
(Arpit Agarwal)
HDFS-5401. Fix NPE in Directory Scanner. (Arpit Agarwal)
HDFS-5417. Fix storage IDs in PBHelper and UpgradeUtilities. (szetszwo)
HDFS-5214. Fix NPEs in BlockManager and DirectoryScanner. (Arpit Agarwal)
HDFS-5435. File append fails to initialize storageIDs. (Junping Du via
Arpit Agarwal)
HDFS-5437. Fix TestBlockReport and TestBPOfferService failures. (Arpit
Agarwal)
HDFS-5447. Fix TestJspHelper. (Arpit Agarwal)
HDFS-5452. Fix TestReplicationPolicy and TestBlocksScheduledCounter.
HDFS-5448. Datanode should generate its ID on first registration. (Arpit
Agarwal)
HDFS-5448. Fix break caused by previous checkin for HDFS-5448. (Arpit
Agarwal)
HDFS-5455. NN should update storageMap on first heartbeat. (Arpit Agarwal)
HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry.
(Contributed by szetszwo)
HDFS-5466. Update storage IDs when the pipeline is updated. (Contributed
by szetszwo)
HDFS-5439. Fix TestPendingReplication. (Contributed by Junping Du, Arpit
Agarwal)
HDFS-5470. Add back trunk's reportDiff algorithm to the branch.
(Contributed by szetszwo)
HDFS-5472. Fix TestDatanodeManager, TestSafeMode and
TestNNThroughputBenchmark (Contributed by szetszwo)
HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit
Agarwal)
HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed
by Junping Du)
HDFS-5480. Update Balancer for HDFS-2832. (Contributed by szetszwo)
HDFS-5486. Fix TestNameNodeMetrics for HDFS-2832. (Arpit Agarwal)
HDFS-5491. Update editsStored for HDFS-2832. (Arpit Agarwal)
HDFS-5494. Fix findbugs warnings for HDFS-2832. (Arpit Agarwal)
HDFS-5508. Fix compilation error after merge. (Contributed by szetszwo)
HDFS-5501. Fix pendingReceivedRequests tracking in BPServiceActor. (Arpit
Agarwal)
HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
(Junping Du via Arpit Agarwal)
HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832. (Arpit Agarwal)
HDFS-5527. Fix TestUnderReplicatedBlocks on branch HDFS-2832. (Arpit
Agarwal)
HDFS-5547. Fix build break after merge from trunk to HDFS-2832. (Arpit
Agarwal)
HDFS-5542. Fix TODO and clean up the code in HDFS-2832. (Contributed by
szetszwo)
HDFS-5559. Fix TestDatanodeConfig in HDFS-2832. (Contributed by szetszwo)
HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is
not accurate. (Eric Sirianni via Arpit Agarwal)
HDFS-5648. Get rid of FsDatasetImpl#perVolumeReplicaMap. (Arpit Agarwal)
Release 2.4.0 - UNRELEASED Release 2.4.0 - UNRELEASED
@ -626,6 +505,10 @@ Release 2.4.0 - UNRELEASED
HDFS-5514. FSNamesystem's fsLock should allow custom implementation (daryn) HDFS-5514. FSNamesystem's fsLock should allow custom implementation (daryn)
HDFS-2832. Heterogeneous Storages support in HDFS phase 1 - treat DataNode
as a collection of storages (see breakdown of tasks below for features and
contributors).
IMPROVEMENTS IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
@ -783,6 +666,11 @@ Release 2.4.0 - UNRELEASED
HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via
Arpit Agarwal) Arpit Agarwal)
HDFS-5695. Clean up TestOfflineEditsViewer and OfflineEditsViewerHelper.
(Haohui Mai via jing9)
HDFS-5220. Expose group resolution time as metric (jxiang via cmccabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
@ -848,6 +736,139 @@ Release 2.4.0 - UNRELEASED
HDFS-5690. DataNode fails to start in secure mode when dfs.http.policy equals to HDFS-5690. DataNode fails to start in secure mode when dfs.http.policy equals to
HTTP_ONLY. (Haohui Mai via jing9) HTTP_ONLY. (Haohui Mai via jing9)
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
HDFS-4985. Add storage type to the protocol and expose it in block report
and block locations. (Arpit Agarwal)
HDFS-5115. Make StorageID a UUID. (Arpit Agarwal)
HDFS-5000. DataNode configuration should allow specifying storage type.
(Arpit Agarwal)
HDFS-4987. Namenode changes to track multiple storages per datanode.
(szetszwo)
HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987.
(Junping Du via szetszwo)
HDFS-5009. Include storage information in the LocatedBlock. (szetszwo)
HDFS-5134. Move blockContentsStale, heartbeatedSinceFailover and
firstBlockReport from DatanodeDescriptor to DatanodeStorageInfo; and
fix a synchronization problem in DatanodeStorageInfo. (szetszwo)
HDFS-5157. Add StorageType to FsVolume. (Junping Du via szetszwo)
HDFS-4990. Change BlockPlacementPolicy to choose storages instead of
datanodes. (szetszwo)
HDFS-5232. Protocol changes to transmit StorageUuid. (Arpit Agarwal)
HDFS-5233. Use Datanode UUID to identify Datanodes. (Arpit Agarwal)
HDFS-5222. Move block schedule information from DatanodeDescriptor to
DatanodeStorageInfo. (szetszwo)
HDFS-4988. Datanode must support all the volumes as individual storages.
(Arpit Agarwal)
HDFS-5377. Heartbeats from Datandode should include one storage report
per storage directory. (Arpit Agarwal)
HDFS-5398. NameNode changes to process storage reports per storage
directory. (Arpit Agarwal)
HDFS-5390. Send one incremental block report per storage directory.
(Arpit Agarwal)
HDFS-5401. Fix NPE in Directory Scanner. (Arpit Agarwal)
HDFS-5417. Fix storage IDs in PBHelper and UpgradeUtilities. (szetszwo)
HDFS-5214. Fix NPEs in BlockManager and DirectoryScanner. (Arpit Agarwal)
HDFS-5435. File append fails to initialize storageIDs. (Junping Du via
Arpit Agarwal)
HDFS-5437. Fix TestBlockReport and TestBPOfferService failures. (Arpit
Agarwal)
HDFS-5447. Fix TestJspHelper. (Arpit Agarwal)
HDFS-5452. Fix TestReplicationPolicy and TestBlocksScheduledCounter.
HDFS-5448. Datanode should generate its ID on first registration. (Arpit
Agarwal)
HDFS-5448. Fix break caused by previous checkin for HDFS-5448. (Arpit
Agarwal)
HDFS-5455. NN should update storageMap on first heartbeat. (Arpit Agarwal)
HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry.
(Contributed by szetszwo)
HDFS-5466. Update storage IDs when the pipeline is updated. (Contributed
by szetszwo)
HDFS-5439. Fix TestPendingReplication. (Contributed by Junping Du, Arpit
Agarwal)
HDFS-5470. Add back trunk's reportDiff algorithm to the branch.
(Contributed by szetszwo)
HDFS-5472. Fix TestDatanodeManager, TestSafeMode and
TestNNThroughputBenchmark (Contributed by szetszwo)
HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit
Agarwal)
HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed
by Junping Du)
HDFS-5480. Update Balancer for HDFS-2832. (Contributed by szetszwo)
HDFS-5486. Fix TestNameNodeMetrics for HDFS-2832. (Arpit Agarwal)
HDFS-5491. Update editsStored for HDFS-2832. (Arpit Agarwal)
HDFS-5494. Fix findbugs warnings for HDFS-2832. (Arpit Agarwal)
HDFS-5508. Fix compilation error after merge. (Contributed by szetszwo)
HDFS-5501. Fix pendingReceivedRequests tracking in BPServiceActor. (Arpit
Agarwal)
HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
(Junping Du via Arpit Agarwal)
HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832. (Arpit Agarwal)
HDFS-5527. Fix TestUnderReplicatedBlocks on branch HDFS-2832. (Arpit
Agarwal)
HDFS-5547. Fix build break after merge from trunk to HDFS-2832. (Arpit
Agarwal)
HDFS-5542. Fix TODO and clean up the code in HDFS-2832. (Contributed by
szetszwo)
HDFS-5559. Fix TestDatanodeConfig in HDFS-2832. (Contributed by szetszwo)
HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is
not accurate. (Eric Sirianni via Arpit Agarwal)
HDFS-5648. Get rid of FsDatasetImpl#perVolumeReplicaMap. (Arpit Agarwal)
HDFS-5406. Send incremental block reports for all storages in a
single call. (Arpit Agarwal)
HDFS-5454. DataNode UUID should be assigned prior to FsDataset
initialization. (Arpit Agarwal)
HDFS-5667. Include DatanodeStorage in StorageReport. (Arpit Agarwal)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -1042,6 +1063,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh)
HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
(brandonli)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1559,13 +1559,17 @@ public class PBHelper {
StorageReportProto.Builder builder = StorageReportProto.newBuilder() StorageReportProto.Builder builder = StorageReportProto.newBuilder()
.setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity()) .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
.setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining()) .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
.setStorageUuid(r.getStorageID()); .setStorageUuid(r.getStorage().getStorageID())
.setStorage(convert(r.getStorage()));
return builder.build(); return builder.build();
} }
public static StorageReport convert(StorageReportProto p) { public static StorageReport convert(StorageReportProto p) {
return new StorageReport(p.getStorageUuid(), p.getFailed(), return new StorageReport(
p.getCapacity(), p.getDfsUsed(), p.getRemaining(), p.hasStorage() ?
convert(p.getStorage()) :
new DatanodeStorage(p.getStorageUuid()),
p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
p.getBlockPoolUsed()); p.getBlockPoolUsed());
} }

View File

@ -21,12 +21,14 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Date; import java.util.Date;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
@ -47,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -76,7 +79,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
/** /**
* Pseudorandom number source * Pseudorandom number source
*/ */
private final Random random = new Random(); private static final Random random = new Random();
/** /**
* The interval at which we scan the namesystem for caching changes. * The interval at which we scan the namesystem for caching changes.
@ -310,8 +313,6 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
FSDirectory fsDir = namesystem.getFSDirectory(); FSDirectory fsDir = namesystem.getFSDirectory();
final long now = new Date().getTime(); final long now = new Date().getTime();
for (CacheDirective directive : cacheManager.getCacheDirectives()) { for (CacheDirective directive : cacheManager.getCacheDirectives()) {
// Reset the directive's statistics
directive.resetStatistics();
// Skip processing this entry if it has expired // Skip processing this entry if it has expired
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("Directive expiry is at " + directive.getExpiryTime()); LOG.trace("Directive expiry is at " + directive.getExpiryTime());
@ -339,7 +340,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
} }
} else if (node.isDirectory()) { } else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory(); INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir.getChildrenList(null); ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
for (INode child : children) { for (INode child : children) {
if (child.isFile()) { if (child.isFile()) {
rescanFile(directive, child.asFile()); rescanFile(directive, child.asFile());
@ -461,7 +463,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
// there may be a period of time when incomplete blocks remain cached // there may be a period of time when incomplete blocks remain cached
// on the DataNodes. // on the DataNodes.
return "not complete"; return "not complete";
} else if (cblock.getReplication() == 0) { } else if (cblock.getReplication() == 0) {
// Since 0 is not a valid value for a cache directive's replication // Since 0 is not a valid value for a cache directive's replication
// field, seeing a replication of 0 on a CacheBlock means that it // field, seeing a replication of 0 on a CacheBlock means that it
// has never been reached by any sweep. // has never been reached by any sweep.
@ -469,6 +471,9 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
} else if (cblock.getMark() != mark) { } else if (cblock.getMark() != mark) {
// Although the block was needed in the past, we didn't reach it during // Although the block was needed in the past, we didn't reach it during
// the current sweep. Therefore, it doesn't need to be cached any more. // the current sweep. Therefore, it doesn't need to be cached any more.
// Need to set the replication to 0 so it doesn't flip back to cached
// when the mark flips on the next scan
cblock.setReplicationAndMark((short)0, mark);
return "no longer needed by any directives"; return "no longer needed by any directives";
} }
return null; return null;
@ -595,7 +600,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
* @param pendingCached A list of DataNodes that will soon cache the * @param pendingCached A list of DataNodes that will soon cache the
* block. * block.
*/ */
private void addNewPendingCached(int neededCached, private void addNewPendingCached(final int neededCached,
CachedBlock cachedBlock, List<DatanodeDescriptor> cached, CachedBlock cachedBlock, List<DatanodeDescriptor> cached,
List<DatanodeDescriptor> pendingCached) { List<DatanodeDescriptor> pendingCached) {
// To figure out which replicas can be cached, we consult the // To figure out which replicas can be cached, we consult the
@ -616,35 +621,156 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
} }
return; return;
} }
List<DatanodeDescriptor> possibilities = new LinkedList<DatanodeDescriptor>(); // Filter the list of replicas to only the valid targets
List<DatanodeDescriptor> possibilities =
new LinkedList<DatanodeDescriptor>();
int numReplicas = blockInfo.getCapacity(); int numReplicas = blockInfo.getCapacity();
Collection<DatanodeDescriptor> corrupt = Collection<DatanodeDescriptor> corrupt =
blockManager.getCorruptReplicas(blockInfo); blockManager.getCorruptReplicas(blockInfo);
int outOfCapacity = 0;
for (int i = 0; i < numReplicas; i++) { for (int i = 0; i < numReplicas; i++) {
DatanodeDescriptor datanode = blockInfo.getDatanode(i); DatanodeDescriptor datanode = blockInfo.getDatanode(i);
if ((datanode != null) && if (datanode == null) {
((!pendingCached.contains(datanode)) && continue;
((corrupt == null) || (!corrupt.contains(datanode))))) {
possibilities.add(datanode);
} }
if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) {
continue;
}
if (corrupt != null && corrupt.contains(datanode)) {
continue;
}
if (pendingCached.contains(datanode) || cached.contains(datanode)) {
continue;
}
long pendingCapacity = datanode.getCacheRemaining();
// Subtract pending cached blocks from effective capacity
Iterator<CachedBlock> it = datanode.getPendingCached().iterator();
while (it.hasNext()) {
CachedBlock cBlock = it.next();
BlockInfo info =
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
if (info != null) {
pendingCapacity -= info.getNumBytes();
}
}
it = datanode.getPendingUncached().iterator();
// Add pending uncached blocks from effective capacity
while (it.hasNext()) {
CachedBlock cBlock = it.next();
BlockInfo info =
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
if (info != null) {
pendingCapacity += info.getNumBytes();
}
}
if (pendingCapacity < blockInfo.getNumBytes()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Datanode " + datanode + " is not a valid possibility for"
+ " block " + blockInfo.getBlockId() + " of size "
+ blockInfo.getNumBytes() + " bytes, only has "
+ datanode.getCacheRemaining() + " bytes of cache remaining.");
}
outOfCapacity++;
continue;
}
possibilities.add(datanode);
} }
while (neededCached > 0) { List<DatanodeDescriptor> chosen = chooseDatanodesForCaching(possibilities,
if (possibilities.isEmpty()) { neededCached, blockManager.getDatanodeManager().getStaleInterval());
LOG.warn("We need " + neededCached + " more replica(s) than " + for (DatanodeDescriptor datanode : chosen) {
"actually exist to provide a cache replication of " +
cachedBlock.getReplication() + " for " + cachedBlock);
return;
}
DatanodeDescriptor datanode =
possibilities.remove(random.nextInt(possibilities.size()));
if (LOG.isDebugEnabled()) {
LOG.debug("AddNewPendingCached: datanode " + datanode +
" will now cache block " + cachedBlock);
}
pendingCached.add(datanode); pendingCached.add(datanode);
boolean added = datanode.getPendingCached().add(cachedBlock); boolean added = datanode.getPendingCached().add(cachedBlock);
assert added; assert added;
neededCached--; }
// We were unable to satisfy the requested replication factor
if (neededCached > chosen.size()) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Only have " +
(cachedBlock.getReplication() - neededCached + chosen.size()) +
" of " + cachedBlock.getReplication() + " cached replicas for " +
cachedBlock + " (" + outOfCapacity + " nodes have insufficient " +
"capacity).");
}
} }
} }
/**
* Chooses datanode locations for caching from a list of valid possibilities.
* Non-stale nodes are chosen before stale nodes.
*
* @param possibilities List of candidate datanodes
* @param neededCached Number of replicas needed
* @param staleInterval Age of a stale datanode
* @return A list of chosen datanodes
*/
private static List<DatanodeDescriptor> chooseDatanodesForCaching(
final List<DatanodeDescriptor> possibilities, final int neededCached,
final long staleInterval) {
// Make a copy that we can modify
List<DatanodeDescriptor> targets =
new ArrayList<DatanodeDescriptor>(possibilities);
// Selected targets
List<DatanodeDescriptor> chosen = new LinkedList<DatanodeDescriptor>();
// Filter out stale datanodes
List<DatanodeDescriptor> stale = new LinkedList<DatanodeDescriptor>();
Iterator<DatanodeDescriptor> it = targets.iterator();
while (it.hasNext()) {
DatanodeDescriptor d = it.next();
if (d.isStale(staleInterval)) {
it.remove();
stale.add(d);
}
}
// Select targets
while (chosen.size() < neededCached) {
// Try to use stale nodes if we're out of non-stale nodes, else we're done
if (targets.isEmpty()) {
if (!stale.isEmpty()) {
targets = stale;
} else {
break;
}
}
// Select a random target
DatanodeDescriptor target =
chooseRandomDatanodeByRemainingCapacity(targets);
chosen.add(target);
targets.remove(target);
}
return chosen;
}
/**
* Choose a single datanode from the provided list of possible
* targets, weighted by the percentage of free space remaining on the node.
*
* @return The chosen datanode
*/
private static DatanodeDescriptor chooseRandomDatanodeByRemainingCapacity(
final List<DatanodeDescriptor> targets) {
// Use a weighted probability to choose the target datanode
float total = 0;
for (DatanodeDescriptor d : targets) {
total += d.getCacheRemainingPercent();
}
// Give each datanode a portion of keyspace equal to its relative weight
// [0, w1) selects d1, [w1, w2) selects d2, etc.
TreeMap<Integer, DatanodeDescriptor> lottery =
new TreeMap<Integer, DatanodeDescriptor>();
int offset = 0;
for (DatanodeDescriptor d : targets) {
// Since we're using floats, be paranoid about negative values
int weight =
Math.max(1, (int)((d.getCacheRemainingPercent() / total) * 1000000));
offset += weight;
lottery.put(offset, d);
}
// Choose a number from [0, offset), which is the total amount of weight,
// to select the winner
DatanodeDescriptor winner =
lottery.higherEntry(random.nextInt(offset)).getValue();
return winner;
}
} }

View File

@ -355,11 +355,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
setLastUpdate(Time.now()); setLastUpdate(Time.now());
this.volumeFailures = volFailures; this.volumeFailures = volFailures;
for (StorageReport report : reports) { for (StorageReport report : reports) {
DatanodeStorageInfo storage = storageMap.get(report.getStorageID()); DatanodeStorageInfo storage = storageMap.get(report.getStorage().getStorageID());
if (storage == null) { if (storage == null) {
// This is seen during cluster initialization when the heartbeat // This is seen during cluster initialization when the heartbeat
// is received before the initial block reports from each storage. // is received before the initial block reports from each storage.
storage = updateStorage(new DatanodeStorage(report.getStorageID())); storage = updateStorage(report.getStorage());
} }
storage.receivedHeartbeat(report); storage.receivedHeartbeat(report);
totalCapacity += report.getCapacity(); totalCapacity += report.getCapacity();

View File

@ -121,7 +121,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
reports = new StorageReport[volumes.volumes.size()]; reports = new StorageReport[volumes.volumes.size()];
int i = 0; int i = 0;
for (FsVolumeImpl volume : volumes.volumes) { for (FsVolumeImpl volume : volumes.volumes) {
reports[i++] = new StorageReport(volume.getStorageID(), reports[i++] = new StorageReport(volume.toDatanodeStorage(),
false, false,
volume.getCapacity(), volume.getCapacity(),
volume.getDfsUsed(), volume.getDfsUsed(),
@ -237,12 +237,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final List<FsVolumeImpl> volArray = new ArrayList<FsVolumeImpl>( final List<FsVolumeImpl> volArray = new ArrayList<FsVolumeImpl>(
storage.getNumStorageDirs()); storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
// TODO: getStorageTypeFromLocations() is only a temporary workaround and
// should be replaced with getting storage type from DataStorage (missing
// storage type now) directly.
Storage.StorageDirectory sd = storage.getStorageDir(idx); Storage.StorageDirectory sd = storage.getStorageDir(idx);
final File dir = sd.getCurrentDir(); final File dir = sd.getCurrentDir();
final StorageType storageType = getStorageTypeFromLocations(dataLocations, dir); final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot());
volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf, volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf,
storageType)); storageType));
LOG.info("Added volume - " + dir + ", StorageType: " + storageType); LOG.info("Added volume - " + dir + ", StorageType: " + storageType);

View File

@ -19,10 +19,10 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor; import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
@ -54,7 +54,7 @@ class FsVolumeImpl implements FsVolumeSpi {
private final String storageID; private final String storageID;
private final StorageType storageType; private final StorageType storageType;
private final Map<String, BlockPoolSlice> bpSlices private final Map<String, BlockPoolSlice> bpSlices
= new HashMap<String, BlockPoolSlice>(); = new ConcurrentHashMap<String, BlockPoolSlice>();
private final File currentDir; // <StorageDirectory>/current private final File currentDir; // <StorageDirectory>/current
private final DF usage; private final DF usage;
private final long reserved; private final long reserved;

View File

@ -53,7 +53,6 @@ import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@ -69,6 +68,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
@ -399,7 +399,8 @@ public final class CacheManager {
requestedBytes = file.computeFileSize(); requestedBytes = file.computeFileSize();
} else if (node.isDirectory()) { } else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory(); INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir.getChildrenList(null); ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
requestedFiles = children.size(); requestedFiles = children.size();
for (INode child : children) { for (INode child : children) {
if (child.isFile()) { if (child.isFile()) {

View File

@ -614,14 +614,14 @@ public class FSDirectory implements Closeable {
INode srcChild = srcIIP.getLastINode(); INode srcChild = srcIIP.getLastINode();
final byte[] srcChildName = srcChild.getLocalNameBytes(); final byte[] srcChildName = srcChild.getLocalNameBytes();
final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot(
srcIIP.getLatestSnapshot()); srcIIP.getLatestSnapshotId());
final boolean srcChildIsReference = srcChild.isReference(); final boolean srcChildIsReference = srcChild.isReference();
// Record the snapshot on srcChild. After the rename, before any new // Record the snapshot on srcChild. After the rename, before any new
// snapshot is taken on the dst tree, changes will be recorded in the latest // snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree. // snapshot of the src tree.
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot()); srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId());
srcIIP.setLastINode(srcChild); srcIIP.setLastINode(srcChild);
} }
@ -629,17 +629,16 @@ public class FSDirectory implements Closeable {
final INodeReference.WithCount withCount; final INodeReference.WithCount withCount;
Quota.Counts oldSrcCounts = Quota.Counts.newInstance(); Quota.Counts oldSrcCounts = Quota.Counts.newInstance();
int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference()
.getDstSnapshotId() : Snapshot.INVALID_ID; .getDstSnapshotId() : Snapshot.CURRENT_STATE_ID;
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
final INodeReference.WithName withName = final INodeReference.WithName withName =
srcIIP.getINode(-2).asDirectory().replaceChild4ReferenceWithName( srcIIP.getINode(-2).asDirectory().replaceChild4ReferenceWithName(
srcChild, srcIIP.getLatestSnapshot()); srcChild, srcIIP.getLatestSnapshotId());
withCount = (INodeReference.WithCount) withName.getReferredINode(); withCount = (INodeReference.WithCount) withName.getReferredINode();
srcChild = withName; srcChild = withName;
srcIIP.setLastINode(srcChild); srcIIP.setLastINode(srcChild);
// get the counts before rename // get the counts before rename
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true, withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true);
Snapshot.INVALID_ID);
} else if (srcChildIsReference) { } else if (srcChildIsReference) {
// srcChild is reference but srcChild is not in latest snapshot // srcChild is reference but srcChild is not in latest snapshot
withCount = (WithCount) srcChild.asReference().getReferredINode(); withCount = (WithCount) srcChild.asReference().getReferredINode();
@ -675,10 +674,9 @@ public class FSDirectory implements Closeable {
toDst = srcChild; toDst = srcChild;
} else { } else {
withCount.getReferredINode().setLocalName(dstChildName); withCount.getReferredINode().setLocalName(dstChildName);
Snapshot dstSnapshot = dstIIP.getLatestSnapshot(); int dstSnapshotId = dstIIP.getLatestSnapshotId();
final INodeReference.DstReference ref = new INodeReference.DstReference( final INodeReference.DstReference ref = new INodeReference.DstReference(
dstParent.asDirectory(), withCount, dstParent.asDirectory(), withCount, dstSnapshotId);
dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId());
toDst = ref; toDst = ref;
} }
@ -690,9 +688,9 @@ public class FSDirectory implements Closeable {
} }
// update modification time of dst and the parent of src // update modification time of dst and the parent of src
final INode srcParent = srcIIP.getINode(-2); final INode srcParent = srcIIP.getINode(-2);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot()); srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
dstParent = dstIIP.getINode(-2); // refresh dstParent dstParent = dstIIP.getINode(-2); // refresh dstParent
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot()); dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
// update moved leases with new filename // update moved leases with new filename
getFSNamesystem().unprotectedChangeLease(src, dst); getFSNamesystem().unprotectedChangeLease(src, dst);
@ -700,7 +698,7 @@ public class FSDirectory implements Closeable {
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
// get the counts after rename // get the counts after rename
Quota.Counts newSrcCounts = srcChild.computeQuotaUsage( Quota.Counts newSrcCounts = srcChild.computeQuotaUsage(
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); Quota.Counts.newInstance(), false);
newSrcCounts.subtract(oldSrcCounts); newSrcCounts.subtract(oldSrcCounts);
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
newSrcCounts.get(Quota.DISKSPACE), false); newSrcCounts.get(Quota.DISKSPACE), false);
@ -732,8 +730,7 @@ public class FSDirectory implements Closeable {
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
// srcParent must have snapshot feature since isSrcInSnapshot is true // srcParent must have snapshot feature since isSrcInSnapshot is true
// and src node has been removed from srcParent // and src node has been removed from srcParent
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild, srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
srcIIP.getLatestSnapshot());
} else { } else {
// original srcChild is not in latest snapshot, we only need to add // original srcChild is not in latest snapshot, we only need to add
// the srcChild back // the srcChild back
@ -836,7 +833,7 @@ public class FSDirectory implements Closeable {
} }
if (dstInode.isDirectory()) { if (dstInode.isDirectory()) {
final ReadOnlyList<INode> children = dstInode.asDirectory() final ReadOnlyList<INode> children = dstInode.asDirectory()
.getChildrenList(null); .getChildrenList(Snapshot.CURRENT_STATE_ID);
if (!children.isEmpty()) { if (!children.isEmpty()) {
error = "rename destination directory is not empty: " + dst; error = "rename destination directory is not empty: " + dst;
NameNode.stateChangeLog.warn( NameNode.stateChangeLog.warn(
@ -867,31 +864,30 @@ public class FSDirectory implements Closeable {
INode srcChild = srcIIP.getLastINode(); INode srcChild = srcIIP.getLastINode();
final byte[] srcChildName = srcChild.getLocalNameBytes(); final byte[] srcChildName = srcChild.getLocalNameBytes();
final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot(
srcIIP.getLatestSnapshot()); srcIIP.getLatestSnapshotId());
final boolean srcChildIsReference = srcChild.isReference(); final boolean srcChildIsReference = srcChild.isReference();
// Record the snapshot on srcChild. After the rename, before any new // Record the snapshot on srcChild. After the rename, before any new
// snapshot is taken on the dst tree, changes will be recorded in the latest // snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree. // snapshot of the src tree.
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot()); srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId());
srcIIP.setLastINode(srcChild); srcIIP.setLastINode(srcChild);
} }
// check srcChild for reference // check srcChild for reference
final INodeReference.WithCount withCount; final INodeReference.WithCount withCount;
int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference()
.getDstSnapshotId() : Snapshot.INVALID_ID; .getDstSnapshotId() : Snapshot.CURRENT_STATE_ID;
Quota.Counts oldSrcCounts = Quota.Counts.newInstance(); Quota.Counts oldSrcCounts = Quota.Counts.newInstance();
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory()
.replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshot()); .replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshotId());
withCount = (INodeReference.WithCount) withName.getReferredINode(); withCount = (INodeReference.WithCount) withName.getReferredINode();
srcChild = withName; srcChild = withName;
srcIIP.setLastINode(srcChild); srcIIP.setLastINode(srcChild);
// get the counts before rename // get the counts before rename
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true, withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true);
Snapshot.INVALID_ID);
} else if (srcChildIsReference) { } else if (srcChildIsReference) {
// srcChild is reference but srcChild is not in latest snapshot // srcChild is reference but srcChild is not in latest snapshot
withCount = (WithCount) srcChild.asReference().getReferredINode(); withCount = (WithCount) srcChild.asReference().getReferredINode();
@ -935,10 +931,9 @@ public class FSDirectory implements Closeable {
toDst = srcChild; toDst = srcChild;
} else { } else {
withCount.getReferredINode().setLocalName(dstChildName); withCount.getReferredINode().setLocalName(dstChildName);
Snapshot dstSnapshot = dstIIP.getLatestSnapshot(); int dstSnapshotId = dstIIP.getLatestSnapshotId();
final INodeReference.DstReference ref = new INodeReference.DstReference( final INodeReference.DstReference ref = new INodeReference.DstReference(
dstIIP.getINode(-2).asDirectory(), withCount, dstIIP.getINode(-2).asDirectory(), withCount, dstSnapshotId);
dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId());
toDst = ref; toDst = ref;
} }
@ -952,9 +947,9 @@ public class FSDirectory implements Closeable {
} }
final INode srcParent = srcIIP.getINode(-2); final INode srcParent = srcIIP.getINode(-2);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot()); srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
dstParent = dstIIP.getINode(-2); dstParent = dstIIP.getINode(-2);
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot()); dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
// update moved lease with new filename // update moved lease with new filename
getFSNamesystem().unprotectedChangeLease(src, dst); getFSNamesystem().unprotectedChangeLease(src, dst);
@ -964,8 +959,8 @@ public class FSDirectory implements Closeable {
undoRemoveDst = false; undoRemoveDst = false;
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
filesDeleted = removedDst.cleanSubtree(null, filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, true)
.get(Quota.NAMESPACE); .get(Quota.NAMESPACE);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks, getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
removedINodes); removedINodes);
@ -981,7 +976,7 @@ public class FSDirectory implements Closeable {
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
// get the counts after rename // get the counts after rename
Quota.Counts newSrcCounts = srcChild.computeQuotaUsage( Quota.Counts newSrcCounts = srcChild.computeQuotaUsage(
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); Quota.Counts.newInstance(), false);
newSrcCounts.subtract(oldSrcCounts); newSrcCounts.subtract(oldSrcCounts);
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
newSrcCounts.get(Quota.DISKSPACE), false); newSrcCounts.get(Quota.DISKSPACE), false);
@ -1012,8 +1007,7 @@ public class FSDirectory implements Closeable {
} }
if (srcParent.isWithSnapshot()) { if (srcParent.isWithSnapshot()) {
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild, srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
srcIIP.getLatestSnapshot());
} else { } else {
// srcParent is not an INodeDirectoryWithSnapshot, we only need to add // srcParent is not an INodeDirectoryWithSnapshot, we only need to add
// the srcChild back // the srcChild back
@ -1024,7 +1018,7 @@ public class FSDirectory implements Closeable {
// Rename failed - restore dst // Rename failed - restore dst
if (dstParent.isDirectory() && dstParent.asDirectory().isWithSnapshot()) { if (dstParent.isDirectory() && dstParent.asDirectory().isWithSnapshot()) {
dstParent.asDirectory().undoRename4DstParent(removedDst, dstParent.asDirectory().undoRename4DstParent(removedDst,
dstIIP.getLatestSnapshot()); dstIIP.getLatestSnapshotId());
} else { } else {
addLastINodeNoQuotaCheck(dstIIP, removedDst); addLastINodeNoQuotaCheck(dstIIP, removedDst);
} }
@ -1088,7 +1082,7 @@ public class FSDirectory implements Closeable {
updateCount(iip, 0, dsDelta, true); updateCount(iip, 0, dsDelta, true);
} }
file = file.setFileReplication(replication, iip.getLatestSnapshot(), file = file.setFileReplication(replication, iip.getLatestSnapshotId(),
inodeMap); inodeMap);
final short newBR = file.getBlockReplication(); final short newBR = file.getBlockReplication();
@ -1155,7 +1149,7 @@ public class FSDirectory implements Closeable {
if (inode == null) { if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src); throw new FileNotFoundException("File does not exist: " + src);
} }
inode.setPermission(permissions, inodesInPath.getLatestSnapshot()); inode.setPermission(permissions, inodesInPath.getLatestSnapshotId());
} }
void setOwner(String src, String username, String groupname) void setOwner(String src, String username, String groupname)
@ -1180,10 +1174,10 @@ public class FSDirectory implements Closeable {
throw new FileNotFoundException("File does not exist: " + src); throw new FileNotFoundException("File does not exist: " + src);
} }
if (username != null) { if (username != null) {
inode = inode.setUser(username, inodesInPath.getLatestSnapshot()); inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
} }
if (groupname != null) { if (groupname != null) {
inode.setGroup(groupname, inodesInPath.getLatestSnapshot()); inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
} }
} }
@ -1225,12 +1219,12 @@ public class FSDirectory implements Closeable {
final INode[] trgINodes = trgIIP.getINodes(); final INode[] trgINodes = trgIIP.getINodes();
final INodeFile trgInode = trgIIP.getLastINode().asFile(); final INodeFile trgInode = trgIIP.getLastINode().asFile();
INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory(); INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory();
final Snapshot trgLatestSnapshot = trgIIP.getLatestSnapshot(); final int trgLatestSnapshot = trgIIP.getLatestSnapshotId();
final INodeFile [] allSrcInodes = new INodeFile[srcs.length]; final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
for(int i = 0; i < srcs.length; i++) { for(int i = 0; i < srcs.length; i++) {
final INodesInPath iip = getINodesInPath4Write(srcs[i]); final INodesInPath iip = getINodesInPath4Write(srcs[i]);
final Snapshot latest = iip.getLatestSnapshot(); final int latest = iip.getLatestSnapshotId();
final INode inode = iip.getLastINode(); final INode inode = iip.getLastINode();
// check if the file in the latest snapshot // check if the file in the latest snapshot
@ -1354,7 +1348,7 @@ public class FSDirectory implements Closeable {
//not found or not a directory //not found or not a directory
return false; return false;
} }
final Snapshot s = inodesInPath.getPathSnapshot(); final int s = inodesInPath.getPathSnapshotId();
return !inode.asDirectory().getChildrenList(s).isEmpty(); return !inode.asDirectory().getChildrenList(s).isEmpty();
} finally { } finally {
readUnlock(); readUnlock();
@ -1408,7 +1402,7 @@ public class FSDirectory implements Closeable {
} }
// record modification // record modification
final Snapshot latestSnapshot = iip.getLatestSnapshot(); final int latestSnapshot = iip.getLatestSnapshotId();
targetNode = targetNode.recordModification(latestSnapshot); targetNode = targetNode.recordModification(latestSnapshot);
iip.setLastINode(targetNode); iip.setLastINode(targetNode);
@ -1429,8 +1423,8 @@ public class FSDirectory implements Closeable {
if (!targetNode.isInLatestSnapshot(latestSnapshot)) { if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes); targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else { } else {
Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, Quota.Counts counts = targetNode.cleanSubtree(Snapshot.CURRENT_STATE_ID,
collectedBlocks, removedINodes, true); latestSnapshot, collectedBlocks, removedINodes, true);
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-counts.get(Quota.DISKSPACE), true); -counts.get(Quota.DISKSPACE), true);
removed = counts.get(Quota.NAMESPACE); removed = counts.get(Quota.NAMESPACE);
@ -1467,7 +1461,7 @@ public class FSDirectory implements Closeable {
} }
} }
} }
for (INode child : targetDir.getChildrenList(null)) { for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
checkSnapshot(child, snapshottableDirs); checkSnapshot(child, snapshottableDirs);
} }
} }
@ -1491,7 +1485,7 @@ public class FSDirectory implements Closeable {
return getSnapshotsListing(srcs, startAfter); return getSnapshotsListing(srcs, startAfter);
} }
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true);
final Snapshot snapshot = inodesInPath.getPathSnapshot(); final int snapshot = inodesInPath.getPathSnapshotId();
final INode targetNode = inodesInPath.getINode(0); final INode targetNode = inodesInPath.getINode(0);
if (targetNode == null) if (targetNode == null)
return null; return null;
@ -1543,7 +1537,8 @@ public class FSDirectory implements Closeable {
final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) { for (int i = 0; i < numOfListing; i++) {
Root sRoot = snapshots.get(i + skipSize).getRoot(); Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null); listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
Snapshot.CURRENT_STATE_ID);
} }
return new DirectoryListing( return new DirectoryListing(
listing, snapshots.size() - skipSize - numOfListing); listing, snapshots.size() - skipSize - numOfListing);
@ -1566,7 +1561,7 @@ public class FSDirectory implements Closeable {
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink);
final INode i = inodesInPath.getINode(0); final INode i = inodesInPath.getINode(0);
return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i,
inodesInPath.getPathSnapshot()); inodesInPath.getPathSnapshotId());
} finally { } finally {
readUnlock(); readUnlock();
} }
@ -2129,7 +2124,7 @@ public class FSDirectory implements Closeable {
} }
final INodeDirectory parent = pathComponents[pos-1].asDirectory(); final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(null).size(); final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
if (count >= maxDirItems) { if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count); = new MaxDirectoryItemsExceededException(maxDirItems, count);
@ -2193,7 +2188,7 @@ public class FSDirectory implements Closeable {
final INodeDirectory parent = inodes[pos-1].asDirectory(); final INodeDirectory parent = inodes[pos-1].asDirectory();
boolean added = false; boolean added = false;
try { try {
added = parent.addChild(child, true, iip.getLatestSnapshot()); added = parent.addChild(child, true, iip.getLatestSnapshotId());
} catch (QuotaExceededException e) { } catch (QuotaExceededException e) {
updateCountNoQuotaCheck(iip, pos, updateCountNoQuotaCheck(iip, pos,
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
@ -2228,7 +2223,7 @@ public class FSDirectory implements Closeable {
*/ */
private long removeLastINode(final INodesInPath iip) private long removeLastINode(final INodesInPath iip)
throws QuotaExceededException { throws QuotaExceededException {
final Snapshot latestSnapshot = iip.getLatestSnapshot(); final int latestSnapshot = iip.getLatestSnapshotId();
final INode last = iip.getLastINode(); final INode last = iip.getLastINode();
final INodeDirectory parent = iip.getINode(-2).asDirectory(); final INodeDirectory parent = iip.getINode(-2).asDirectory();
if (!parent.removeChild(last, latestSnapshot)) { if (!parent.removeChild(last, latestSnapshot)) {
@ -2382,7 +2377,7 @@ public class FSDirectory implements Closeable {
return null; return null;
} }
final Snapshot latest = iip.getLatestSnapshot(); final int latest = iip.getLatestSnapshotId();
dirNode = dirNode.recordModification(latest); dirNode = dirNode.recordModification(latest);
dirNode.setQuota(nsQuota, dsQuota); dirNode.setQuota(nsQuota, dsQuota);
return dirNode; return dirNode;
@ -2425,11 +2420,11 @@ public class FSDirectory implements Closeable {
* Sets the access time on the file/directory. Logs it in the transaction log. * Sets the access time on the file/directory. Logs it in the transaction log.
*/ */
void setTimes(String src, INode inode, long mtime, long atime, boolean force, void setTimes(String src, INode inode, long mtime, long atime, boolean force,
Snapshot latest) throws QuotaExceededException { int latestSnapshotId) throws QuotaExceededException {
boolean status = false; boolean status = false;
writeLock(); writeLock();
try { try {
status = unprotectedSetTimes(inode, mtime, atime, force, latest); status = unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId);
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -2443,11 +2438,11 @@ public class FSDirectory implements Closeable {
assert hasWriteLock(); assert hasWriteLock();
final INodesInPath i = getLastINodeInPath(src); final INodesInPath i = getLastINodeInPath(src);
return unprotectedSetTimes(i.getLastINode(), mtime, atime, force, return unprotectedSetTimes(i.getLastINode(), mtime, atime, force,
i.getLatestSnapshot()); i.getLatestSnapshotId());
} }
private boolean unprotectedSetTimes(INode inode, long mtime, private boolean unprotectedSetTimes(INode inode, long mtime,
long atime, boolean force, Snapshot latest) throws QuotaExceededException { long atime, boolean force, int latest) throws QuotaExceededException {
assert hasWriteLock(); assert hasWriteLock();
boolean status = false; boolean status = false;
if (mtime != -1) { if (mtime != -1) {
@ -2455,7 +2450,7 @@ public class FSDirectory implements Closeable {
status = true; status = true;
} }
if (atime != -1) { if (atime != -1) {
long inodeTime = inode.getAccessTime(null); long inodeTime = inode.getAccessTime();
// if the last access time update was within the last precision interval, then // if the last access time update was within the last precision interval, then
// no need to store access time // no need to store access time
@ -2495,7 +2490,7 @@ public class FSDirectory implements Closeable {
* @throws IOException if any error occurs * @throws IOException if any error occurs
*/ */
private HdfsFileStatus createFileStatus(byte[] path, INode node, private HdfsFileStatus createFileStatus(byte[] path, INode node,
boolean needLocation, Snapshot snapshot) throws IOException { boolean needLocation, int snapshot) throws IOException {
if (needLocation) { if (needLocation) {
return createLocatedFileStatus(path, node, snapshot); return createLocatedFileStatus(path, node, snapshot);
} else { } else {
@ -2506,7 +2501,7 @@ public class FSDirectory implements Closeable {
* Create FileStatus by file INode * Create FileStatus by file INode
*/ */
HdfsFileStatus createFileStatus(byte[] path, INode node, HdfsFileStatus createFileStatus(byte[] path, INode node,
Snapshot snapshot) { int snapshot) {
long size = 0; // length is zero for directories long size = 0; // length is zero for directories
short replication = 0; short replication = 0;
long blocksize = 0; long blocksize = 0;
@ -2539,7 +2534,7 @@ public class FSDirectory implements Closeable {
* Create FileStatus with location info by file INode * Create FileStatus with location info by file INode
*/ */
private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path,
INode node, Snapshot snapshot) throws IOException { INode node, int snapshot) throws IOException {
assert hasReadLock(); assert hasReadLock();
long size = 0; // length is zero for directories long size = 0; // length is zero for directories
short replication = 0; short replication = 0;
@ -2551,7 +2546,7 @@ public class FSDirectory implements Closeable {
replication = fileNode.getFileReplication(snapshot); replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize(); blocksize = fileNode.getPreferredBlockSize();
final boolean inSnapshot = snapshot != null; final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction(); final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction();
final long fileSize = !inSnapshot && isUc ? final long fileSize = !inSnapshot && isUc ?
fileNode.computeFileSizeNotIncludingLastUcBlock() : size; fileNode.computeFileSizeNotIncludingLastUcBlock() : size;

View File

@ -24,14 +24,12 @@ import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumMap; import java.util.EnumMap;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -79,6 +77,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
@ -327,7 +326,7 @@ public class FSEditLogLoader {
// add the op into retry cache if necessary // add the op into retry cache if necessary
if (toAddRetryCache) { if (toAddRetryCache) {
HdfsFileStatus stat = fsNamesys.dir.createFileStatus( HdfsFileStatus stat = fsNamesys.dir.createFileStatus(
HdfsFileStatus.EMPTY_NAME, newFile, null); HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
addCloseOp.rpcCallId, stat); addCloseOp.rpcCallId, stat);
} }
@ -340,7 +339,7 @@ public class FSEditLogLoader {
} }
LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path, LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path,
oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null, oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null,
false, iip.getLatestSnapshot(), false); false, iip.getLatestSnapshotId(), false);
newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path), newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path),
addCloseOp.path, true); addCloseOp.path, true);
@ -356,8 +355,8 @@ public class FSEditLogLoader {
// update the block list. // update the block list.
// Update the salient file attributes. // Update the salient file attributes.
newFile.setAccessTime(addCloseOp.atime, null); newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
newFile.setModificationTime(addCloseOp.mtime, null); newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
updateBlocks(fsDir, addCloseOp, newFile); updateBlocks(fsDir, addCloseOp, newFile);
break; break;
} }
@ -375,8 +374,8 @@ public class FSEditLogLoader {
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path); final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
// Update the salient file attributes. // Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, null); file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
file.setModificationTime(addCloseOp.mtime, null); file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
updateBlocks(fsDir, addCloseOp, file); updateBlocks(fsDir, addCloseOp, file);
// Now close the file // Now close the file

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
@ -405,60 +406,64 @@ public class FSImage implements Closeable {
// Directories that don't have previous state do not rollback // Directories that don't have previous state do not rollback
boolean canRollback = false; boolean canRollback = false;
FSImage prevState = new FSImage(conf); FSImage prevState = new FSImage(conf);
prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION; try {
for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION;
StorageDirectory sd = it.next(); for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
File prevDir = sd.getPreviousDir(); StorageDirectory sd = it.next();
if (!prevDir.exists()) { // use current directory then File prevDir = sd.getPreviousDir();
LOG.info("Storage directory " + sd.getRoot() if (!prevDir.exists()) { // use current directory then
+ " does not contain previous fs state."); LOG.info("Storage directory " + sd.getRoot()
// read and verify consistency with other directories + " does not contain previous fs state.");
storage.readProperties(sd); // read and verify consistency with other directories
continue; storage.readProperties(sd);
continue;
}
// read and verify consistency of the prev dir
prevState.getStorage().readPreviousVersionProperties(sd);
if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) {
throw new IOException(
"Cannot rollback to storage version " +
prevState.getLayoutVersion() +
" using this version of the NameNode, which uses storage version " +
HdfsConstants.LAYOUT_VERSION + ". " +
"Please use the previous version of HDFS to perform the rollback.");
}
canRollback = true;
} }
if (!canRollback)
throw new IOException("Cannot rollback. None of the storage "
+ "directories contain previous fs state.");
// read and verify consistency of the prev dir // Now that we know all directories are going to be consistent
prevState.getStorage().readPreviousVersionProperties(sd); // Do rollback for each directory containing previous state
for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File prevDir = sd.getPreviousDir();
if (!prevDir.exists())
continue;
if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) { LOG.info("Rolling back storage directory " + sd.getRoot()
throw new IOException( + ".\n new LV = " + prevState.getStorage().getLayoutVersion()
"Cannot rollback to storage version " + + "; new CTime = " + prevState.getStorage().getCTime());
prevState.getLayoutVersion() + File tmpDir = sd.getRemovedTmp();
" using this version of the NameNode, which uses storage version " + assert !tmpDir.exists() : "removed.tmp directory must not exist.";
HdfsConstants.LAYOUT_VERSION + ". " + // rename current to tmp
"Please use the previous version of HDFS to perform the rollback."); File curDir = sd.getCurrentDir();
assert curDir.exists() : "Current directory must exist.";
NNStorage.rename(curDir, tmpDir);
// rename previous to current
NNStorage.rename(prevDir, curDir);
// delete tmp dir
NNStorage.deleteDir(tmpDir);
LOG.info("Rollback of " + sd.getRoot()+ " is complete.");
} }
canRollback = true; isUpgradeFinalized = true;
} finally {
prevState.close();
} }
if (!canRollback)
throw new IOException("Cannot rollback. None of the storage "
+ "directories contain previous fs state.");
// Now that we know all directories are going to be consistent
// Do rollback for each directory containing previous state
for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File prevDir = sd.getPreviousDir();
if (!prevDir.exists())
continue;
LOG.info("Rolling back storage directory " + sd.getRoot()
+ ".\n new LV = " + prevState.getStorage().getLayoutVersion()
+ "; new CTime = " + prevState.getStorage().getCTime());
File tmpDir = sd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// rename current to tmp
File curDir = sd.getCurrentDir();
assert curDir.exists() : "Current directory must exist.";
NNStorage.rename(curDir, tmpDir);
// rename previous to current
NNStorage.rename(prevDir, curDir);
// delete tmp dir
NNStorage.deleteDir(tmpDir);
LOG.info("Rollback of " + sd.getRoot()+ " is complete.");
}
isUpgradeFinalized = true;
} }
private void doFinalize(StorageDirectory sd) throws IOException { private void doFinalize(StorageDirectory sd) throws IOException {
@ -766,7 +771,7 @@ public class FSImage implements Closeable {
dir.computeQuotaUsage4CurrentDirectory(counts); dir.computeQuotaUsage4CurrentDirectory(counts);
for (INode child : dir.getChildrenList(null)) { for (INode child : dir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
if (child.isDirectory()) { if (child.isDirectory()) {
updateCountForQuotaRecursively(child.asDirectory(), counts); updateCountForQuotaRecursively(child.asDirectory(), counts);
} else { } else {

View File

@ -1111,7 +1111,8 @@ public class FSImageFormat {
return; return;
} }
final ReadOnlyList<INode> children = current.getChildrenList(null); final ReadOnlyList<INode> children = current
.getChildrenList(Snapshot.CURRENT_STATE_ID);
int dirNum = 0; int dirNum = 0;
List<INodeDirectory> snapshotDirs = null; List<INodeDirectory> snapshotDirs = null;
DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature();

View File

@ -176,7 +176,15 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.*; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.OutOfV1GenerationStampsException;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@ -1631,11 +1639,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (isReadOp) { if (isReadOp) {
continue; continue;
} }
dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshot()); dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshotId());
} }
} }
final long fileSize = iip.isSnapshot() ? final long fileSize = iip.isSnapshot() ?
inode.computeFileSize(iip.getPathSnapshot()) inode.computeFileSize(iip.getPathSnapshotId())
: inode.computeFileSizeNotIncludingLastUcBlock(); : inode.computeFileSizeNotIncludingLastUcBlock();
boolean isUc = inode.isUnderConstruction(); boolean isUc = inode.isUnderConstruction();
if (iip.isSnapshot()) { if (iip.isSnapshot()) {
@ -1883,7 +1891,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final INodesInPath iip = dir.getINodesInPath4Write(src); final INodesInPath iip = dir.getINodesInPath4Write(src);
final INode inode = iip.getLastINode(); final INode inode = iip.getLastINode();
if (inode != null) { if (inode != null) {
dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshot()); dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshotId());
resultingStat = getAuditFileInfo(src, false); resultingStat = getAuditFileInfo(src, false);
} else { } else {
throw new FileNotFoundException("File/Directory " + src + " does not exist."); throw new FileNotFoundException("File/Directory " + src + " does not exist.");
@ -2279,7 +2287,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final DatanodeDescriptor clientNode = final DatanodeDescriptor clientNode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode, return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
true, iip.getLatestSnapshot(), logRetryCache); true, iip.getLatestSnapshotId(), logRetryCache);
} catch (IOException ie) { } catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage()); NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
throw ie; throw ie;
@ -2304,7 +2312,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*/ */
LocatedBlock prepareFileForWrite(String src, INodeFile file, LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode, String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache) boolean writeToEditLog, int latestSnapshot, boolean logRetryCache)
throws IOException { throws IOException {
file = file.recordModification(latestSnapshot); file = file.recordModification(latestSnapshot);
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine, final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
@ -2939,7 +2947,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
finalizeINodeFileUnderConstruction(src, pendingFile, finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot()); iip.getLatestSnapshotId());
return true; return true;
} }
@ -3648,7 +3656,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// then reap lease immediately and close the file. // then reap lease immediately and close the file.
if(nrCompleteBlocks == nrBlocks) { if(nrCompleteBlocks == nrBlocks) {
finalizeINodeFileUnderConstruction(src, pendingFile, finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot()); iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK*" NameNode.stateChangeLog.warn("BLOCK*"
+ " internalReleaseLease: All existing blocks are COMPLETE," + " internalReleaseLease: All existing blocks are COMPLETE,"
+ " lease removed, file closed."); + " lease removed, file closed.");
@ -3697,7 +3705,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if(penultimateBlockMinReplication && if(penultimateBlockMinReplication &&
blockManager.checkMinReplication(lastBlock)) { blockManager.checkMinReplication(lastBlock)) {
finalizeINodeFileUnderConstruction(src, pendingFile, finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot()); iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK*" NameNode.stateChangeLog.warn("BLOCK*"
+ " internalReleaseLease: Committed blocks are minimally replicated," + " internalReleaseLease: Committed blocks are minimally replicated,"
+ " lease removed, file closed."); + " lease removed, file closed.");
@ -3728,7 +3736,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// We can remove this block and close the file. // We can remove this block and close the file.
pendingFile.removeLastBlock(lastBlock); pendingFile.removeLastBlock(lastBlock);
finalizeINodeFileUnderConstruction(src, pendingFile, finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot()); iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: " NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ "Removed empty last block and closed file."); + "Removed empty last block and closed file.");
return true; return true;
@ -3789,7 +3797,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
private void finalizeINodeFileUnderConstruction(String src, private void finalizeINodeFileUnderConstruction(String src,
INodeFile pendingFile, Snapshot latestSnapshot) throws IOException, INodeFile pendingFile, int latestSnapshot) throws IOException,
UnresolvedLinkException { UnresolvedLinkException {
assert hasWriteLock(); assert hasWriteLock();
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature(); FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
@ -3989,7 +3997,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
//remove lease, close file //remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile, finalizeINodeFileUnderConstruction(src, pendingFile,
Snapshot.findLatestSnapshot(pendingFile, null)); Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID));
return src; return src;
} }
@ -7114,7 +7122,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync(); getEditLog().logSync();
} }
if (isAuditEnabled() && isExternalInvocation()) { if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addCacheDirective", null, null, null); logAuditEvent(success, "modifyCacheDirective", null, null, null);
} }
RetryCache.setState(cacheEntry, success); RetryCache.setState(cacheEntry, success);
} }

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
@ -29,7 +28,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -142,54 +140,54 @@ class FSPermissionChecker {
// check if (parentAccess != null) && file exists, then check sb // check if (parentAccess != null) && file exists, then check sb
// If resolveLink, the check is performed on the link target. // If resolveLink, the check is performed on the link target.
final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink); final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink);
final Snapshot snapshot = inodesInPath.getPathSnapshot(); final int snapshotId = inodesInPath.getPathSnapshotId();
final INode[] inodes = inodesInPath.getINodes(); final INode[] inodes = inodesInPath.getINodes();
int ancestorIndex = inodes.length - 2; int ancestorIndex = inodes.length - 2;
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
ancestorIndex--); ancestorIndex--);
checkTraverse(inodes, ancestorIndex, snapshot); checkTraverse(inodes, ancestorIndex, snapshotId);
final INode last = inodes[inodes.length - 1]; final INode last = inodes[inodes.length - 1];
if (parentAccess != null && parentAccess.implies(FsAction.WRITE) if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
&& inodes.length > 1 && last != null) { && inodes.length > 1 && last != null) {
checkStickyBit(inodes[inodes.length - 2], last, snapshot); checkStickyBit(inodes[inodes.length - 2], last, snapshotId);
} }
if (ancestorAccess != null && inodes.length > 1) { if (ancestorAccess != null && inodes.length > 1) {
check(inodes, ancestorIndex, snapshot, ancestorAccess); check(inodes, ancestorIndex, snapshotId, ancestorAccess);
} }
if (parentAccess != null && inodes.length > 1) { if (parentAccess != null && inodes.length > 1) {
check(inodes, inodes.length - 2, snapshot, parentAccess); check(inodes, inodes.length - 2, snapshotId, parentAccess);
} }
if (access != null) { if (access != null) {
check(last, snapshot, access); check(last, snapshotId, access);
} }
if (subAccess != null) { if (subAccess != null) {
checkSubAccess(last, snapshot, subAccess); checkSubAccess(last, snapshotId, subAccess);
} }
if (doCheckOwner) { if (doCheckOwner) {
checkOwner(last, snapshot); checkOwner(last, snapshotId);
} }
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */
private void checkOwner(INode inode, Snapshot snapshot private void checkOwner(INode inode, int snapshotId
) throws AccessControlException { ) throws AccessControlException {
if (inode != null && user.equals(inode.getUserName(snapshot))) { if (inode != null && user.equals(inode.getUserName(snapshotId))) {
return; return;
} }
throw new AccessControlException("Permission denied"); throw new AccessControlException("Permission denied");
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */
private void checkTraverse(INode[] inodes, int last, Snapshot snapshot private void checkTraverse(INode[] inodes, int last, int snapshotId
) throws AccessControlException { ) throws AccessControlException {
for(int j = 0; j <= last; j++) { for(int j = 0; j <= last; j++) {
check(inodes[j], snapshot, FsAction.EXECUTE); check(inodes[j], snapshotId, FsAction.EXECUTE);
} }
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access private void checkSubAccess(INode inode, int snapshotId, FsAction access
) throws AccessControlException { ) throws AccessControlException {
if (inode == null || !inode.isDirectory()) { if (inode == null || !inode.isDirectory()) {
return; return;
@ -198,9 +196,9 @@ class FSPermissionChecker {
Stack<INodeDirectory> directories = new Stack<INodeDirectory>(); Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) { for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
INodeDirectory d = directories.pop(); INodeDirectory d = directories.pop();
check(d, snapshot, access); check(d, snapshotId, access);
for(INode child : d.getChildrenList(snapshot)) { for(INode child : d.getChildrenList(snapshotId)) {
if (child.isDirectory()) { if (child.isDirectory()) {
directories.push(child.asDirectory()); directories.push(child.asDirectory());
} }
@ -209,23 +207,23 @@ class FSPermissionChecker {
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */
private void check(INode[] inodes, int i, Snapshot snapshot, FsAction access private void check(INode[] inodes, int i, int snapshotId, FsAction access
) throws AccessControlException { ) throws AccessControlException {
check(i >= 0? inodes[i]: null, snapshot, access); check(i >= 0? inodes[i]: null, snapshotId, access);
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */
private void check(INode inode, Snapshot snapshot, FsAction access private void check(INode inode, int snapshotId, FsAction access
) throws AccessControlException { ) throws AccessControlException {
if (inode == null) { if (inode == null) {
return; return;
} }
FsPermission mode = inode.getFsPermission(snapshot); FsPermission mode = inode.getFsPermission(snapshotId);
if (user.equals(inode.getUserName(snapshot))) { //user class if (user.equals(inode.getUserName(snapshotId))) { //user class
if (mode.getUserAction().implies(access)) { return; } if (mode.getUserAction().implies(access)) { return; }
} }
else if (groups.contains(inode.getGroupName(snapshot))) { //group class else if (groups.contains(inode.getGroupName(snapshotId))) { //group class
if (mode.getGroupAction().implies(access)) { return; } if (mode.getGroupAction().implies(access)) { return; }
} }
else { //other class else { //other class
@ -236,19 +234,19 @@ class FSPermissionChecker {
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */
private void checkStickyBit(INode parent, INode inode, Snapshot snapshot private void checkStickyBit(INode parent, INode inode, int snapshotId
) throws AccessControlException { ) throws AccessControlException {
if(!parent.getFsPermission(snapshot).getStickyBit()) { if(!parent.getFsPermission(snapshotId).getStickyBit()) {
return; return;
} }
// If this user is the directory owner, return // If this user is the directory owner, return
if(parent.getUserName(snapshot).equals(user)) { if(parent.getUserName(snapshotId).equals(user)) {
return; return;
} }
// if this user is the file owner, return // if this user is the file owner, return
if(inode.getUserName(snapshot).equals(user)) { if(inode.getUserName(snapshotId).equals(user)) {
return; return;
} }

View File

@ -70,98 +70,101 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
} }
/** Get the {@link PermissionStatus} */ /** Get the {@link PermissionStatus} */
abstract PermissionStatus getPermissionStatus(Snapshot snapshot); abstract PermissionStatus getPermissionStatus(int snapshotId);
/** The same as getPermissionStatus(null). */ /** The same as getPermissionStatus(null). */
final PermissionStatus getPermissionStatus() { final PermissionStatus getPermissionStatus() {
return getPermissionStatus(null); return getPermissionStatus(Snapshot.CURRENT_STATE_ID);
} }
/** /**
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current inode. * from the given snapshot; otherwise, get the result from the
* current inode.
* @return user name * @return user name
*/ */
abstract String getUserName(Snapshot snapshot); abstract String getUserName(int snapshotId);
/** The same as getUserName(null). */ /** The same as getUserName(Snapshot.CURRENT_STATE_ID). */
@Override @Override
public final String getUserName() { public final String getUserName() {
return getUserName(null); return getUserName(Snapshot.CURRENT_STATE_ID);
} }
/** Set user */ /** Set user */
abstract void setUser(String user); abstract void setUser(String user);
/** Set user */ /** Set user */
final INode setUser(String user, Snapshot latest) final INode setUser(String user, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest); final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setUser(user); nodeToUpdate.setUser(user);
return nodeToUpdate; return nodeToUpdate;
} }
/** /**
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current inode. * from the given snapshot; otherwise, get the result from the
* current inode.
* @return group name * @return group name
*/ */
abstract String getGroupName(Snapshot snapshot); abstract String getGroupName(int snapshotId);
/** The same as getGroupName(null). */ /** The same as getGroupName(Snapshot.CURRENT_STATE_ID). */
@Override @Override
public final String getGroupName() { public final String getGroupName() {
return getGroupName(null); return getGroupName(Snapshot.CURRENT_STATE_ID);
} }
/** Set group */ /** Set group */
abstract void setGroup(String group); abstract void setGroup(String group);
/** Set group */ /** Set group */
final INode setGroup(String group, Snapshot latest) final INode setGroup(String group, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest); final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setGroup(group); nodeToUpdate.setGroup(group);
return nodeToUpdate; return nodeToUpdate;
} }
/** /**
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current inode. * from the given snapshot; otherwise, get the result from the
* current inode.
* @return permission. * @return permission.
*/ */
abstract FsPermission getFsPermission(Snapshot snapshot); abstract FsPermission getFsPermission(int snapshotId);
/** The same as getFsPermission(null). */ /** The same as getFsPermission(Snapshot.CURRENT_STATE_ID). */
@Override @Override
public final FsPermission getFsPermission() { public final FsPermission getFsPermission() {
return getFsPermission(null); return getFsPermission(Snapshot.CURRENT_STATE_ID);
} }
/** Set the {@link FsPermission} of this {@link INode} */ /** Set the {@link FsPermission} of this {@link INode} */
abstract void setPermission(FsPermission permission); abstract void setPermission(FsPermission permission);
/** Set the {@link FsPermission} of this {@link INode} */ /** Set the {@link FsPermission} of this {@link INode} */
INode setPermission(FsPermission permission, Snapshot latest) INode setPermission(FsPermission permission, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest); final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setPermission(permission); nodeToUpdate.setPermission(permission);
return nodeToUpdate; return nodeToUpdate;
} }
/** /**
* @return if the given snapshot is null, return this; * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},
* otherwise return the corresponding snapshot inode. * return this; otherwise return the corresponding snapshot inode.
*/ */
public INodeAttributes getSnapshotINode(final Snapshot snapshot) { public INodeAttributes getSnapshotINode(final int snapshotId) {
return this; return this;
} }
/** Is this inode in the latest snapshot? */ /** Is this inode in the latest snapshot? */
public final boolean isInLatestSnapshot(final Snapshot latest) { public final boolean isInLatestSnapshot(final int latestSnapshotId) {
if (latest == null) { if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
return false; return false;
} }
// if parent is a reference node, parent must be a renamed node. We can // if parent is a reference node, parent must be a renamed node. We can
@ -173,10 +176,11 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
if (parentDir == null) { // root if (parentDir == null) { // root
return true; return true;
} }
if (!parentDir.isInLatestSnapshot(latest)) { if (!parentDir.isInLatestSnapshot(latestSnapshotId)) {
return false; return false;
} }
final INode child = parentDir.getChild(getLocalNameBytes(), latest); final INode child = parentDir.getChild(getLocalNameBytes(),
latestSnapshotId);
if (this == child) { if (this == child) {
return true; return true;
} }
@ -203,21 +207,22 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* operation, or the snapshot belonging to the DST tree. * operation, or the snapshot belonging to the DST tree.
* *
* @param latestInDst * @param latestInDst
* the latest snapshot in the DST tree above the reference node * id of the latest snapshot in the DST tree above the reference node
* @return True: the modification should be recorded in the snapshot that * @return True: the modification should be recorded in the snapshot that
* belongs to the SRC tree. False: the modification should be * belongs to the SRC tree. False: the modification should be
* recorded in the snapshot that belongs to the DST tree. * recorded in the snapshot that belongs to the DST tree.
*/ */
public final boolean shouldRecordInSrcSnapshot(final Snapshot latestInDst) { public final boolean shouldRecordInSrcSnapshot(final int latestInDst) {
Preconditions.checkState(!isReference()); Preconditions.checkState(!isReference());
if (latestInDst == null) { if (latestInDst == Snapshot.CURRENT_STATE_ID) {
return true; return true;
} }
INodeReference withCount = getParentReference(); INodeReference withCount = getParentReference();
if (withCount != null) { if (withCount != null) {
int dstSnapshotId = withCount.getParentReference().getDstSnapshotId(); int dstSnapshotId = withCount.getParentReference().getDstSnapshotId();
if (dstSnapshotId >= latestInDst.getId()) { if (dstSnapshotId != Snapshot.CURRENT_STATE_ID
&& dstSnapshotId >= latestInDst) {
return true; return true;
} }
} }
@ -228,13 +233,14 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* This inode is being modified. The previous version of the inode needs to * This inode is being modified. The previous version of the inode needs to
* be recorded in the latest snapshot. * be recorded in the latest snapshot.
* *
* @param latest the latest snapshot that has been taken. * @param latestSnapshotId The id of the latest snapshot that has been taken.
* Note that it is null if no snapshots have been taken. * Note that it is {@link Snapshot#CURRENT_STATE_ID}
* if no snapshots have been taken.
* @return The current inode, which usually is the same object of this inode. * @return The current inode, which usually is the same object of this inode.
* However, in some cases, this inode may be replaced with a new inode * However, in some cases, this inode may be replaced with a new inode
* for maintaining snapshots. The current inode is then the new inode. * for maintaining snapshots. The current inode is then the new inode.
*/ */
abstract INode recordModification(final Snapshot latest) abstract INode recordModification(final int latestSnapshotId)
throws QuotaExceededException; throws QuotaExceededException;
/** Check whether it's a reference. */ /** Check whether it's a reference. */
@ -330,12 +336,13 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* snapshot in its diff list. Recursively clean its children. * snapshot in its diff list. Recursively clean its children.
* </pre> * </pre>
* *
* @param snapshot * @param snapshotId
* The snapshot to delete. Null means to delete the current * The id of the snapshot to delete.
* {@link Snapshot#CURRENT_STATE_ID} means to delete the current
* file/directory. * file/directory.
* @param prior * @param priorSnapshotId
* The latest snapshot before the to-be-deleted snapshot. When * The id of the latest snapshot before the to-be-deleted snapshot.
* deleting a current inode, this parameter captures the latest * When deleting a current inode, this parameter captures the latest
* snapshot. * snapshot.
* @param collectedBlocks * @param collectedBlocks
* blocks collected from the descents for further block * blocks collected from the descents for further block
@ -345,8 +352,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* inodeMap * inodeMap
* @return quota usage delta when deleting a snapshot * @return quota usage delta when deleting a snapshot
*/ */
public abstract Quota.Counts cleanSubtree(final Snapshot snapshot, public abstract Quota.Counts cleanSubtree(final int snapshotId,
Snapshot prior, BlocksMapUpdateInfo collectedBlocks, int priorSnapshotId, BlocksMapUpdateInfo collectedBlocks,
List<INode> removedINodes, boolean countDiffChange) List<INode> removedINodes, boolean countDiffChange)
throws QuotaExceededException; throws QuotaExceededException;
@ -460,9 +467,10 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @param counts The subtree counts for returning. * @param counts The subtree counts for returning.
* @param useCache Whether to use cached quota usage. Note that * @param useCache Whether to use cached quota usage. Note that
* {@link WithName} node never uses cache for its subtree. * {@link WithName} node never uses cache for its subtree.
* @param lastSnapshotId {@link Snapshot#INVALID_ID} indicates the computation * @param lastSnapshotId {@link Snapshot#CURRENT_STATE_ID} indicates the
* is in the current tree. Otherwise the id indicates * computation is in the current tree. Otherwise the id
* the computation range for a {@link WithName} node. * indicates the computation range for a
* {@link WithName} node.
* @return The same objects as the counts parameter. * @return The same objects as the counts parameter.
*/ */
public abstract Quota.Counts computeQuotaUsage(Quota.Counts counts, public abstract Quota.Counts computeQuotaUsage(Quota.Counts counts,
@ -470,7 +478,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public final Quota.Counts computeQuotaUsage(Quota.Counts counts, public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache) { boolean useCache) {
return computeQuotaUsage(counts, useCache, Snapshot.INVALID_ID); return computeQuotaUsage(counts, useCache, Snapshot.CURRENT_STATE_ID);
} }
/** /**
@ -558,21 +566,22 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
} }
/** /**
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current inode. * from the given snapshot; otherwise, get the result from the
* current inode.
* @return modification time. * @return modification time.
*/ */
abstract long getModificationTime(Snapshot snapshot); abstract long getModificationTime(int snapshotId);
/** The same as getModificationTime(null). */ /** The same as getModificationTime(Snapshot.CURRENT_STATE_ID). */
@Override @Override
public final long getModificationTime() { public final long getModificationTime() {
return getModificationTime(null); return getModificationTime(Snapshot.CURRENT_STATE_ID);
} }
/** Update modification time if it is larger than the current value. */ /** Update modification time if it is larger than the current value. */
public abstract INode updateModificationTime(long mtime, Snapshot latest) public abstract INode updateModificationTime(long mtime, int latestSnapshotId)
throws QuotaExceededException; throws QuotaExceededException;
/** Set the last modification time of inode. */ /** Set the last modification time of inode. */
@ -580,24 +589,25 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** Set the last modification time of inode. */ /** Set the last modification time of inode. */
public final INode setModificationTime(long modificationTime, public final INode setModificationTime(long modificationTime,
Snapshot latest) throws QuotaExceededException { int latestSnapshotId) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest); final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setModificationTime(modificationTime); nodeToUpdate.setModificationTime(modificationTime);
return nodeToUpdate; return nodeToUpdate;
} }
/** /**
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current inode. * from the given snapshot; otherwise, get the result from the
* current inode.
* @return access time * @return access time
*/ */
abstract long getAccessTime(Snapshot snapshot); abstract long getAccessTime(int snapshotId);
/** The same as getAccessTime(null). */ /** The same as getAccessTime(Snapshot.CURRENT_STATE_ID). */
@Override @Override
public final long getAccessTime() { public final long getAccessTime() {
return getAccessTime(null); return getAccessTime(Snapshot.CURRENT_STATE_ID);
} }
/** /**
@ -608,9 +618,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** /**
* Set last access time of inode. * Set last access time of inode.
*/ */
public final INode setAccessTime(long accessTime, Snapshot latest) public final INode setAccessTime(long accessTime, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest); final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setAccessTime(accessTime); nodeToUpdate.setAccessTime(accessTime);
return nodeToUpdate; return nodeToUpdate;
} }
@ -679,13 +689,15 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
@VisibleForTesting @VisibleForTesting
public final StringBuffer dumpTreeRecursively() { public final StringBuffer dumpTreeRecursively() {
final StringWriter out = new StringWriter(); final StringWriter out = new StringWriter();
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null); dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
return out.getBuffer(); return out.getBuffer();
} }
@VisibleForTesting @VisibleForTesting
public final void dumpTreeRecursively(PrintStream out) { public final void dumpTreeRecursively(PrintStream out) {
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null); dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
} }
/** /**
@ -694,7 +706,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
*/ */
@VisibleForTesting @VisibleForTesting
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
Snapshot snapshot) { int snapshotId) {
out.print(prefix); out.print(prefix);
out.print(" "); out.print(" ");
final String name = getLocalName(); final String name = getLocalName();
@ -703,7 +715,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
out.print(getObjectString()); out.print(getObjectString());
out.print("), "); out.print("), ");
out.print(getParentString()); out.print(getParentString());
out.print(", " + getPermissionStatus(snapshot)); out.print(", " + getPermissionStatus(snapshotId));
} }
/** /**

View File

@ -204,9 +204,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
} }
@Override @Override
public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) { public INodeDirectoryAttributes getSnapshotINode(int snapshotId) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshot, this); return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshotId, this);
} }
@Override @Override
@ -217,12 +217,13 @@ public class INodeDirectory extends INodeWithAdditionalFields
/** Replace itself with an {@link INodeDirectorySnapshottable}. */ /** Replace itself with an {@link INodeDirectorySnapshottable}. */
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable( public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException { int latestSnapshotId, final INodeMap inodeMap)
throws QuotaExceededException {
Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable), Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
"this is already an INodeDirectorySnapshottable, this=%s", this); "this is already an INodeDirectorySnapshottable, this=%s", this);
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this); final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs() replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
.saveSelf2Snapshot(latest, s, this); .saveSelf2Snapshot(latestSnapshotId, s, this);
return s; return s;
} }
@ -289,8 +290,8 @@ public class INodeDirectory extends INodeWithAdditionalFields
} }
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild, INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
Snapshot latest) { int latestSnapshotId) {
Preconditions.checkArgument(latest != null); Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
if (oldChild instanceof INodeReference.WithName) { if (oldChild instanceof INodeReference.WithName) {
return (INodeReference.WithName)oldChild; return (INodeReference.WithName)oldChild;
} }
@ -304,22 +305,23 @@ public class INodeDirectory extends INodeWithAdditionalFields
withCount = new INodeReference.WithCount(null, oldChild); withCount = new INodeReference.WithCount(null, oldChild);
} }
final INodeReference.WithName ref = new INodeReference.WithName(this, final INodeReference.WithName ref = new INodeReference.WithName(this,
withCount, oldChild.getLocalNameBytes(), latest.getId()); withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
replaceChild(oldChild, ref, null); replaceChild(oldChild, ref, null);
return ref; return ref;
} }
@Override @Override
public INodeDirectory recordModification(Snapshot latest) public INodeDirectory recordModification(int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) { if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// add snapshot feature if necessary // add snapshot feature if necessary
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf == null) { if (sf == null) {
sf = addSnapshotFeature(null); sf = addSnapshotFeature(null);
} }
// record self in the diff list if necessary // record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latest, this, null); sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
} }
return this; return this;
} }
@ -329,9 +331,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
* *
* @return the child inode, which may be replaced. * @return the child inode, which may be replaced.
*/ */
public INode saveChild2Snapshot(final INode child, final Snapshot latest, public INode saveChild2Snapshot(final INode child, final int latestSnapshotId,
final INode snapshotCopy) throws QuotaExceededException { final INode snapshotCopy) throws QuotaExceededException {
if (latest == null) { if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
return child; return child;
} }
@ -340,42 +342,45 @@ public class INodeDirectory extends INodeWithAdditionalFields
if (sf == null) { if (sf == null) {
sf = this.addSnapshotFeature(null); sf = this.addSnapshotFeature(null);
} }
return sf.saveChild2Snapshot(this, child, latest, snapshotCopy); return sf.saveChild2Snapshot(this, child, latestSnapshotId, snapshotCopy);
} }
/** /**
* @param name the name of the child * @param name the name of the child
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current directory. * from the corresponding snapshot; otherwise, get the result from
* the current directory.
* @return the child inode. * @return the child inode.
*/ */
public INode getChild(byte[] name, Snapshot snapshot) { public INode getChild(byte[] name, int snapshotId) {
DirectoryWithSnapshotFeature sf; DirectoryWithSnapshotFeature sf;
if (snapshot == null || (sf = getDirectoryWithSnapshotFeature()) == null) { if (snapshotId == Snapshot.CURRENT_STATE_ID ||
(sf = getDirectoryWithSnapshotFeature()) == null) {
ReadOnlyList<INode> c = getCurrentChildrenList(); ReadOnlyList<INode> c = getCurrentChildrenList();
final int i = ReadOnlyList.Util.binarySearch(c, name); final int i = ReadOnlyList.Util.binarySearch(c, name);
return i < 0 ? null : c.get(i); return i < 0 ? null : c.get(i);
} }
return sf.getChild(this, name, snapshot); return sf.getChild(this, name, snapshotId);
} }
/** /**
* @param snapshot * @param snapshotId
* if it is not null, get the result from the given snapshot; * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* otherwise, get the result from the current directory. * from the corresponding snapshot; otherwise, get the result from
* the current directory.
* @return the current children list if the specified snapshot is null; * @return the current children list if the specified snapshot is null;
* otherwise, return the children list corresponding to the snapshot. * otherwise, return the children list corresponding to the snapshot.
* Note that the returned list is never null. * Note that the returned list is never null.
*/ */
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) { public ReadOnlyList<INode> getChildrenList(final int snapshotId) {
DirectoryWithSnapshotFeature sf; DirectoryWithSnapshotFeature sf;
if (snapshot == null if (snapshotId == Snapshot.CURRENT_STATE_ID
|| (sf = this.getDirectoryWithSnapshotFeature()) == null) { || (sf = this.getDirectoryWithSnapshotFeature()) == null) {
return getCurrentChildrenList(); return getCurrentChildrenList();
} }
return sf.getChildrenList(this, snapshot); return sf.getChildrenList(this, snapshotId);
} }
private ReadOnlyList<INode> getCurrentChildrenList() { private ReadOnlyList<INode> getCurrentChildrenList() {
@ -450,15 +455,15 @@ public class INodeDirectory extends INodeWithAdditionalFields
/** /**
* Remove the specified child from this directory. * Remove the specified child from this directory.
*/ */
public boolean removeChild(INode child, Snapshot latest) public boolean removeChild(INode child, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
if (isInLatestSnapshot(latest)) { if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary // create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) { if (sf == null) {
sf = this.addSnapshotFeature(null); sf = this.addSnapshotFeature(null);
} }
return sf.removeChild(this, child, latest); return sf.removeChild(this, child, latestSnapshotId);
} }
return removeChild(child); return removeChild(child);
} }
@ -493,24 +498,24 @@ public class INodeDirectory extends INodeWithAdditionalFields
* otherwise, return true; * otherwise, return true;
*/ */
public boolean addChild(INode node, final boolean setModTime, public boolean addChild(INode node, final boolean setModTime,
final Snapshot latest) throws QuotaExceededException { final int latestSnapshotId) throws QuotaExceededException {
final int low = searchChildren(node.getLocalNameBytes()); final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) { if (low >= 0) {
return false; return false;
} }
if (isInLatestSnapshot(latest)) { if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary // create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) { if (sf == null) {
sf = this.addSnapshotFeature(null); sf = this.addSnapshotFeature(null);
} }
return sf.addChild(this, node, setModTime, latest); return sf.addChild(this, node, setModTime, latestSnapshotId);
} }
addChild(node, low); addChild(node, low);
if (setModTime) { if (setModTime) {
// update modification time of the parent directory // update modification time of the parent directory
updateModificationTime(node.getModificationTime(), latest); updateModificationTime(node.getModificationTime(), latestSnapshotId);
} }
return true; return true;
} }
@ -548,10 +553,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
// we are computing the quota usage for a specific snapshot here, i.e., the // we are computing the quota usage for a specific snapshot here, i.e., the
// computation only includes files/directories that exist at the time of the // computation only includes files/directories that exist at the time of the
// given snapshot // given snapshot
if (sf != null && lastSnapshotId != Snapshot.INVALID_ID if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
&& !(useCache && isQuotaSet())) { && !(useCache && isQuotaSet())) {
Snapshot lastSnapshot = sf.getDiffs().getSnapshotById(lastSnapshotId); ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
for (INode child : childrenList) { for (INode child : childrenList) {
child.computeQuotaUsage(counts, useCache, lastSnapshotId); child.computeQuotaUsage(counts, useCache, lastSnapshotId);
} }
@ -607,7 +611,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
ContentSummaryComputationContext computeDirectoryContentSummary( ContentSummaryComputationContext computeDirectoryContentSummary(
ContentSummaryComputationContext summary) { ContentSummaryComputationContext summary) {
ReadOnlyList<INode> childrenList = getChildrenList(null); ReadOnlyList<INode> childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID);
// Explicit traversing is done to enable repositioning after relinquishing // Explicit traversing is done to enable repositioning after relinquishing
// and reacquiring locks. // and reacquiring locks.
for (int i = 0; i < childrenList.size(); i++) { for (int i = 0; i < childrenList.size(); i++) {
@ -629,7 +633,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
break; break;
} }
// Obtain the children list again since it may have been modified. // Obtain the children list again since it may have been modified.
childrenList = getChildrenList(null); childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID);
// Reposition in case the children list is changed. Decrement by 1 // Reposition in case the children list is changed. Decrement by 1
// since it will be incremented when loops. // since it will be incremented when loops.
i = nextChild(childrenList, childName) - 1; i = nextChild(childrenList, childName) - 1;
@ -668,21 +672,16 @@ public class INodeDirectory extends INodeWithAdditionalFields
* The reference node to be removed/replaced * The reference node to be removed/replaced
* @param newChild * @param newChild
* The node to be added back * The node to be added back
* @param latestSnapshot
* The latest snapshot. Note this may not be the last snapshot in the
* diff list, since the src tree of the current rename operation
* may be the dst tree of a previous rename.
* @throws QuotaExceededException should not throw this exception * @throws QuotaExceededException should not throw this exception
*/ */
public void undoRename4ScrParent(final INodeReference oldChild, public void undoRename4ScrParent(final INodeReference oldChild,
final INode newChild, Snapshot latestSnapshot) final INode newChild) throws QuotaExceededException {
throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
Preconditions.checkState(sf != null, Preconditions.checkState(sf != null,
"Directory does not have snapshot feature"); "Directory does not have snapshot feature");
sf.getDiffs().removeChild(ListType.DELETED, oldChild); sf.getDiffs().removeChild(ListType.DELETED, oldChild);
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild); sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
addChild(newChild, true, null); addChild(newChild, true, Snapshot.CURRENT_STATE_ID);
} }
/** /**
@ -691,16 +690,14 @@ public class INodeDirectory extends INodeWithAdditionalFields
* and delete possible record in the deleted list. * and delete possible record in the deleted list.
*/ */
public void undoRename4DstParent(final INode deletedChild, public void undoRename4DstParent(final INode deletedChild,
Snapshot latestSnapshot) throws QuotaExceededException { int latestSnapshotId) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
Preconditions.checkState(sf != null, Preconditions.checkState(sf != null,
"Directory does not have snapshot feature"); "Directory does not have snapshot feature");
boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED, boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
deletedChild); deletedChild);
// pass null for inodeMap since the parent node will not get replaced when int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
// undoing rename final boolean added = addChild(deletedChild, true, sid);
final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
: latestSnapshot);
// update quota usage if adding is successfully and the old child has not // update quota usage if adding is successfully and the old child has not
// been stored in deleted list before // been stored in deleted list before
if (added && !removeDeletedChild) { if (added && !removeDeletedChild) {
@ -722,8 +719,8 @@ public class INodeDirectory extends INodeWithAdditionalFields
} }
/** Call cleanSubtree(..) recursively down the subtree. */ /** Call cleanSubtree(..) recursively down the subtree. */
public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, public Quota.Counts cleanSubtreeRecursively(final int snapshot,
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, int prior, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final Map<INode, INode> excludedNodes, final List<INode> removedINodes, final Map<INode, INode> excludedNodes,
final boolean countDiffChange) throws QuotaExceededException { final boolean countDiffChange) throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
@ -732,9 +729,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
// to its latest previous snapshot. (besides, we also need to consider nodes // to its latest previous snapshot. (besides, we also need to consider nodes
// created after prior but before snapshot. this will be done in // created after prior but before snapshot. this will be done in
// DirectoryWithSnapshotFeature) // DirectoryWithSnapshotFeature)
Snapshot s = snapshot != null && prior != null ? prior : snapshot; int s = snapshot != Snapshot.CURRENT_STATE_ID
&& prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot;
for (INode child : getChildrenList(s)) { for (INode child : getChildrenList(s)) {
if (snapshot != null && excludedNodes != null if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null
&& excludedNodes.containsKey(child)) { && excludedNodes.containsKey(child)) {
continue; continue;
} else { } else {
@ -753,7 +751,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
if (sf != null) { if (sf != null) {
sf.clear(this, collectedBlocks, removedINodes); sf.clear(this, collectedBlocks, removedINodes);
} }
for (INode child : getChildrenList(null)) { for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
child.destroyAndCollectBlocks(collectedBlocks, removedINodes); child.destroyAndCollectBlocks(collectedBlocks, removedINodes);
} }
clear(); clear();
@ -761,18 +759,19 @@ public class INodeDirectory extends INodeWithAdditionalFields
} }
@Override @Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, public Quota.Counts cleanSubtree(final int snapshotId, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data // there is snapshot data
if (sf != null) { if (sf != null) {
return sf.cleanDirectory(this, snapshot, prior, collectedBlocks, return sf.cleanDirectory(this, snapshotId, priorSnapshotId,
removedINodes, countDiffChange); collectedBlocks, removedINodes, countDiffChange);
} }
// there is no snapshot data // there is no snapshot data
if (prior == null && snapshot == null) { if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID
&& snapshotId == Snapshot.CURRENT_STATE_ID) {
// destroy the whole subtree and collect blocks that should be deleted // destroy the whole subtree and collect blocks that should be deleted
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
this.computeQuotaUsage(counts, true); this.computeQuotaUsage(counts, true);
@ -780,7 +779,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
return counts; return counts;
} else { } else {
// process recursively down the subtree // process recursively down the subtree
Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior, Quota.Counts counts = cleanSubtreeRecursively(snapshotId, priorSnapshotId,
collectedBlocks, removedINodes, null, countDiffChange); collectedBlocks, removedINodes, null, countDiffChange);
if (isQuotaSet()) { if (isQuotaSet()) {
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache( getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
@ -816,7 +815,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
@VisibleForTesting @VisibleForTesting
@Override @Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) { final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot); super.dumpTreeRecursively(out, prefix, snapshot);
out.print(", childrenSize=" + getChildrenList(snapshot).size()); out.print(", childrenSize=" + getChildrenList(snapshot).size());
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
@ -824,7 +823,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
out.print(", " + q); out.print(", " + q);
} }
if (this instanceof Snapshot.Root) { if (this instanceof Snapshot.Root) {
out.print(", snapshotId=" + snapshot.getId()); out.print(", snapshotId=" + snapshot);
} }
out.println(); out.println();
@ -869,7 +868,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
for(final Iterator<SnapshotAndINode> i = subs.iterator(); i.hasNext();) { for(final Iterator<SnapshotAndINode> i = subs.iterator(); i.hasNext();) {
final SnapshotAndINode pair = i.next(); final SnapshotAndINode pair = i.next();
prefix.append(i.hasNext()? DUMPTREE_EXCEPT_LAST_ITEM: DUMPTREE_LAST_ITEM); prefix.append(i.hasNext()? DUMPTREE_EXCEPT_LAST_ITEM: DUMPTREE_LAST_ITEM);
pair.inode.dumpTreeRecursively(out, prefix, pair.snapshot); pair.inode.dumpTreeRecursively(out, prefix, pair.snapshotId);
prefix.setLength(prefix.length() - 2); prefix.setLength(prefix.length() - 2);
} }
} }
@ -877,20 +876,16 @@ public class INodeDirectory extends INodeWithAdditionalFields
/** A pair of Snapshot and INode objects. */ /** A pair of Snapshot and INode objects. */
protected static class SnapshotAndINode { protected static class SnapshotAndINode {
public final Snapshot snapshot; public final int snapshotId;
public final INode inode; public final INode inode;
public SnapshotAndINode(Snapshot snapshot, INode inode) { public SnapshotAndINode(int snapshot, INode inode) {
this.snapshot = snapshot; this.snapshotId = snapshot;
this.inode = inode; this.inode = inode;
} }
public SnapshotAndINode(Snapshot snapshot) {
this(snapshot, snapshot.getRoot());
}
} }
public final int getChildrenNum(final Snapshot snapshot) { public final int getChildrenNum(final int snapshotId) {
return getChildrenList(snapshot).size(); return getChildrenList(snapshotId).size();
} }
} }

View File

@ -17,6 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
@ -282,26 +285,27 @@ public class INodeFile extends INodeWithAdditionalFields
} }
@Override @Override
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) { public INodeFileAttributes getSnapshotINode(final int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) { if (sf != null) {
return sf.getDiffs().getSnapshotINode(snapshot, this); return sf.getDiffs().getSnapshotINode(snapshotId, this);
} else { } else {
return this; return this;
} }
} }
@Override @Override
public INodeFile recordModification(final Snapshot latest) public INodeFile recordModification(final int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) { if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// the file is in snapshot, create a snapshot feature if it does not have // the file is in snapshot, create a snapshot feature if it does not have
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf == null) { if (sf == null) {
sf = addSnapshotFeature(null); sf = addSnapshotFeature(null);
} }
// record self in the diff list if necessary // record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latest, this, null); sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
} }
return this; return this;
} }
@ -317,23 +321,22 @@ public class INodeFile extends INodeWithAdditionalFields
/* End of Snapshot Feature */ /* End of Snapshot Feature */
/** @return the replication factor of the file. */ /** @return the replication factor of the file. */
public final short getFileReplication(Snapshot snapshot) { public final short getFileReplication(int snapshot) {
if (snapshot != null) { if (snapshot != CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getFileReplication(); return getSnapshotINode(snapshot).getFileReplication();
} }
return HeaderFormat.getReplication(header); return HeaderFormat.getReplication(header);
} }
/** The same as getFileReplication(null). */ /** The same as getFileReplication(null). */
@Override // INodeFileAttributes @Override // INodeFileAttributes
public final short getFileReplication() { public final short getFileReplication() {
return getFileReplication(null); return getFileReplication(CURRENT_STATE_ID);
} }
@Override // BlockCollection @Override // BlockCollection
public short getBlockReplication() { public short getBlockReplication() {
short max = getFileReplication(null); short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) { if (sf != null) {
short maxInSnapshot = sf.getMaxBlockRepInDiffs(); short maxInSnapshot = sf.getMaxBlockRepInDiffs();
@ -351,9 +354,10 @@ public class INodeFile extends INodeWithAdditionalFields
} }
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication, Snapshot latest, public final INodeFile setFileReplication(short replication,
final INodeMap inodeMap) throws QuotaExceededException { int latestSnapshotId, final INodeMap inodeMap)
final INodeFile nodeToUpdate = recordModification(latest); throws QuotaExceededException {
final INodeFile nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setFileReplication(replication); nodeToUpdate.setFileReplication(replication);
return nodeToUpdate; return nodeToUpdate;
} }
@ -431,22 +435,22 @@ public class INodeFile extends INodeWithAdditionalFields
} }
@Override @Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, public Quota.Counts cleanSubtree(final int snapshot, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) { if (sf != null) {
return sf.cleanFile(this, snapshot, prior, collectedBlocks, return sf.cleanFile(this, snapshot, priorSnapshotId, collectedBlocks,
removedINodes, countDiffChange); removedINodes, countDiffChange);
} }
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
if (snapshot == null && prior == null) { if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) {
// this only happens when deleting the current file and the file is not // this only happens when deleting the current file and the file is not
// in any snapshot // in any snapshot
computeQuotaUsage(counts, false); computeQuotaUsage(counts, false);
destroyAndCollectBlocks(collectedBlocks, removedINodes); destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else if (snapshot == null && prior != null) { } else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) {
// when deleting the current file and the file is in snapshot, we should // when deleting the current file and the file is in snapshot, we should
// clean the 0-sized block if the file is UC // clean the 0-sized block if the file is UC
FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
@ -490,17 +494,18 @@ public class INodeFile extends INodeWithAdditionalFields
FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) { if (sf != null) {
FileDiffList fileDiffList = sf.getDiffs(); FileDiffList fileDiffList = sf.getDiffs();
Snapshot last = fileDiffList.getLastSnapshot(); int last = fileDiffList.getLastSnapshotId();
List<FileDiff> diffs = fileDiffList.asList(); List<FileDiff> diffs = fileDiffList.asList();
if (lastSnapshotId == Snapshot.INVALID_ID || last == null) { if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
nsDelta += diffs.size(); nsDelta += diffs.size();
dsDelta = diskspaceConsumed(); dsDelta = diskspaceConsumed();
} else if (last.getId() < lastSnapshotId) { } else if (last < lastSnapshotId) {
dsDelta = computeFileSize(true, false) * getFileReplication(); dsDelta = computeFileSize(true, false) * getFileReplication();
} else { } else {
Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId); int sid = fileDiffList.getSnapshotById(lastSnapshotId);
dsDelta = diskspaceConsumed(s); dsDelta = diskspaceConsumed(sid);
} }
} else { } else {
dsDelta = diskspaceConsumed(); dsDelta = diskspaceConsumed();
@ -511,7 +516,7 @@ public class INodeFile extends INodeWithAdditionalFields
} }
@Override @Override
public final ContentSummaryComputationContext computeContentSummary( public final ContentSummaryComputationContext computeContentSummary(
final ContentSummaryComputationContext summary) { final ContentSummaryComputationContext summary) {
computeContentSummary4Snapshot(summary.getCounts()); computeContentSummary4Snapshot(summary.getCounts());
computeContentSummary4Current(summary.getCounts()); computeContentSummary4Current(summary.getCounts());
@ -550,23 +555,21 @@ public class INodeFile extends INodeWithAdditionalFields
/** The same as computeFileSize(null). */ /** The same as computeFileSize(null). */
public final long computeFileSize() { public final long computeFileSize() {
return computeFileSize(null); return computeFileSize(CURRENT_STATE_ID);
} }
/** /**
* Compute file size of the current file if the given snapshot is null; * Compute file size of the current file if the given snapshot is null;
* otherwise, get the file size from the given snapshot. * otherwise, get the file size from the given snapshot.
*/ */
public final long computeFileSize(Snapshot snapshot) { public final long computeFileSize(int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (snapshot != null && sf != null) { if (snapshotId != CURRENT_STATE_ID && sf != null) {
final FileDiff d = sf.getDiffs().getDiff( final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
snapshot);
if (d != null) { if (d != null) {
return d.getFileSize(); return d.getFileSize();
} }
} }
return computeFileSize(true, false); return computeFileSize(true, false);
} }
@ -617,9 +620,10 @@ public class INodeFile extends INodeWithAdditionalFields
return computeFileSize(true, true) * getBlockReplication(); return computeFileSize(true, true) * getBlockReplication();
} }
public final long diskspaceConsumed(Snapshot lastSnapshot) { public final long diskspaceConsumed(int lastSnapshotId) {
if (lastSnapshot != null) { if (lastSnapshotId != CURRENT_STATE_ID) {
return computeFileSize(lastSnapshot) * getFileReplication(lastSnapshot); return computeFileSize(lastSnapshotId)
* getFileReplication(lastSnapshotId);
} else { } else {
return diskspaceConsumed(); return diskspaceConsumed();
} }
@ -648,9 +652,9 @@ public class INodeFile extends INodeWithAdditionalFields
@VisibleForTesting @VisibleForTesting
@Override @Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) { final int snapshotId) {
super.dumpTreeRecursively(out, prefix, snapshot); super.dumpTreeRecursively(out, prefix, snapshotId);
out.print(", fileSize=" + computeFileSize(snapshot)); out.print(", fileSize=" + computeFileSize(snapshotId));
// only compare the first block // only compare the first block
out.print(", blocks="); out.print(", blocks=");
out.print(blocks == null || blocks.length == 0? null: blocks[0]); out.print(blocks == null || blocks.length == 0? null: blocks[0]);

View File

@ -23,7 +23,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.Quota.Counts; import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.LightWeightGSet;
@ -89,7 +88,8 @@ public class INodeMap {
"", "", new FsPermission((short) 0)), 0, 0) { "", "", new FsPermission((short) 0)), 0, 0) {
@Override @Override
INode recordModification(Snapshot latest) throws QuotaExceededException { INode recordModification(int latestSnapshotId)
throws QuotaExceededException {
return null; return null;
} }
@ -112,7 +112,7 @@ public class INodeMap {
} }
@Override @Override
public Counts cleanSubtree(Snapshot snapshot, Snapshot prior, public Counts cleanSubtree(int snapshotId, int priorSnapshotId,
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes, BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
boolean countDiffChange) throws QuotaExceededException { boolean countDiffChange) throws QuotaExceededException {
return null; return null;

View File

@ -91,7 +91,7 @@ public abstract class INodeReference extends INode {
* method to identify the snapshot which is the latest snapshot before the * method to identify the snapshot which is the latest snapshot before the
* reference node's creation. * reference node's creation.
*/ */
static Snapshot getPriorSnapshot(INodeReference ref) { static int getPriorSnapshot(INodeReference ref) {
WithCount wc = (WithCount) ref.getReferredINode(); WithCount wc = (WithCount) ref.getReferredINode();
WithName wn = null; WithName wn = null;
if (ref instanceof DstReference) { if (ref instanceof DstReference) {
@ -111,7 +111,7 @@ public abstract class INodeReference extends INode {
} }
} }
} }
return null; return Snapshot.NO_SNAPSHOT_ID;
} }
private INode referred; private INode referred;
@ -185,13 +185,13 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public final PermissionStatus getPermissionStatus(Snapshot snapshot) { public final PermissionStatus getPermissionStatus(int snapshotId) {
return referred.getPermissionStatus(snapshot); return referred.getPermissionStatus(snapshotId);
} }
@Override @Override
public final String getUserName(Snapshot snapshot) { public final String getUserName(int snapshotId) {
return referred.getUserName(snapshot); return referred.getUserName(snapshotId);
} }
@Override @Override
@ -200,8 +200,8 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public final String getGroupName(Snapshot snapshot) { public final String getGroupName(int snapshotId) {
return referred.getGroupName(snapshot); return referred.getGroupName(snapshotId);
} }
@Override @Override
@ -210,8 +210,8 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public final FsPermission getFsPermission(Snapshot snapshot) { public final FsPermission getFsPermission(int snapshotId) {
return referred.getFsPermission(snapshot); return referred.getFsPermission(snapshotId);
} }
@Override @Override
public final short getFsPermissionShort() { public final short getFsPermissionShort() {
@ -229,14 +229,14 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public final long getModificationTime(Snapshot snapshot) { public final long getModificationTime(int snapshotId) {
return referred.getModificationTime(snapshot); return referred.getModificationTime(snapshotId);
} }
@Override @Override
public final INode updateModificationTime(long mtime, Snapshot latest) public final INode updateModificationTime(long mtime, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
return referred.updateModificationTime(mtime, latest); return referred.updateModificationTime(mtime, latestSnapshotId);
} }
@Override @Override
@ -245,8 +245,8 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public final long getAccessTime(Snapshot snapshot) { public final long getAccessTime(int snapshotId) {
return referred.getAccessTime(snapshot); return referred.getAccessTime(snapshotId);
} }
@Override @Override
@ -255,15 +255,15 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
final INode recordModification(Snapshot latest) final INode recordModification(int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
referred.recordModification(latest); referred.recordModification(latestSnapshotId);
// reference is never replaced // reference is never replaced
return this; return this;
} }
@Override // used by WithCount @Override // used by WithCount
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, public Quota.Counts cleanSubtree(int snapshot, int prior,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes, BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes,
final boolean countDiffChange) throws QuotaExceededException { final boolean countDiffChange) throws QuotaExceededException {
return referred.cleanSubtree(snapshot, prior, collectedBlocks, return referred.cleanSubtree(snapshot, prior, collectedBlocks,
@ -291,8 +291,8 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public final INodeAttributes getSnapshotINode(Snapshot snapshot) { public final INodeAttributes getSnapshotINode(int snapshotId) {
return referred.getSnapshotINode(snapshot); return referred.getSnapshotINode(snapshotId);
} }
@Override @Override
@ -308,7 +308,7 @@ public abstract class INodeReference extends INode {
@Override @Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) { final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot); super.dumpTreeRecursively(out, prefix, snapshot);
if (this instanceof DstReference) { if (this instanceof DstReference) {
out.print(", dstSnapshotId=" + ((DstReference) this).dstSnapshotId); out.print(", dstSnapshotId=" + ((DstReference) this).dstSnapshotId);
@ -327,7 +327,7 @@ public abstract class INodeReference extends INode {
} }
public int getDstSnapshotId() { public int getDstSnapshotId() {
return Snapshot.INVALID_ID; return Snapshot.CURRENT_STATE_ID;
} }
/** An anonymous reference with reference count. */ /** An anonymous reference with reference count. */
@ -457,34 +457,35 @@ public abstract class INodeReference extends INode {
// node happened before the rename of its ancestor. This should be // node happened before the rename of its ancestor. This should be
// impossible since for WithName node we only count its children at the // impossible since for WithName node we only count its children at the
// time of the rename. // time of the rename.
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId); Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| this.lastSnapshotId >= lastSnapshotId);
final INode referred = this.getReferredINode().asReference() final INode referred = this.getReferredINode().asReference()
.getReferredINode(); .getReferredINode();
// We will continue the quota usage computation using the same snapshot id // We will continue the quota usage computation using the same snapshot id
// as time line (if the given snapshot id is valid). Also, we cannot use // as time line (if the given snapshot id is valid). Also, we cannot use
// cache for the referred node since its cached quota may have already // cache for the referred node since its cached quota may have already
// been updated by changes in the current tree. // been updated by changes in the current tree.
int id = lastSnapshotId > Snapshot.INVALID_ID ? int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ?
lastSnapshotId : this.lastSnapshotId; lastSnapshotId : this.lastSnapshotId;
return referred.computeQuotaUsage(counts, false, id); return referred.computeQuotaUsage(counts, false, id);
} }
@Override @Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, public Quota.Counts cleanSubtree(final int snapshot, int prior,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
// since WithName node resides in deleted list acting as a snapshot copy, // since WithName node resides in deleted list acting as a snapshot copy,
// the parameter snapshot must be non-null // the parameter snapshot must be non-null
Preconditions.checkArgument(snapshot != null); Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID);
// if prior is null, we need to check snapshot belonging to the previous // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to the
// WithName instance // previous WithName instance
if (prior == null) { if (prior == Snapshot.NO_SNAPSHOT_ID) {
prior = getPriorSnapshot(this); prior = getPriorSnapshot(this);
} }
if (prior != null if (prior != Snapshot.NO_SNAPSHOT_ID
&& Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) { && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
return Quota.Counts.newInstance(); return Quota.Counts.newInstance();
} }
@ -496,7 +497,7 @@ public abstract class INodeReference extends INode {
-counts.get(Quota.DISKSPACE), true); -counts.get(Quota.DISKSPACE), true);
} }
if (snapshot.getId() < lastSnapshotId) { if (snapshot < lastSnapshotId) {
// for a WithName node, when we compute its quota usage, we only count // for a WithName node, when we compute its quota usage, we only count
// in all the nodes existing at the time of the corresponding rename op. // in all the nodes existing at the time of the corresponding rename op.
// Thus if we are deleting a snapshot before/at the snapshot associated // Thus if we are deleting a snapshot before/at the snapshot associated
@ -509,16 +510,16 @@ public abstract class INodeReference extends INode {
@Override @Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks, public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) { final List<INode> removedINodes) {
Snapshot snapshot = getSelfSnapshot(); int snapshot = getSelfSnapshot();
if (removeReference(this) <= 0) { if (removeReference(this) <= 0) {
getReferredINode().destroyAndCollectBlocks(collectedBlocks, getReferredINode().destroyAndCollectBlocks(collectedBlocks,
removedINodes); removedINodes);
} else { } else {
Snapshot prior = getPriorSnapshot(this); int prior = getPriorSnapshot(this);
INode referred = getReferredINode().asReference().getReferredINode(); INode referred = getReferredINode().asReference().getReferredINode();
if (snapshot != null) { if (snapshot != Snapshot.NO_SNAPSHOT_ID) {
if (prior != null && snapshot.getId() <= prior.getId()) { if (prior != Snapshot.NO_SNAPSHOT_ID && snapshot <= prior) {
// the snapshot to be deleted has been deleted while traversing // the snapshot to be deleted has been deleted while traversing
// the src tree of the previous rename operation. This usually // the src tree of the previous rename operation. This usually
// happens when rename's src and dst are under the same // happens when rename's src and dst are under the same
@ -545,9 +546,9 @@ public abstract class INodeReference extends INode {
} }
} }
private Snapshot getSelfSnapshot() { private int getSelfSnapshot() {
INode referred = getReferredINode().asReference().getReferredINode(); INode referred = getReferredINode().asReference().getReferredINode();
Snapshot snapshot = null; int snapshot = Snapshot.NO_SNAPSHOT_ID;
if (referred.isFile() && referred.asFile().isWithSnapshot()) { if (referred.isFile() && referred.asFile().isWithSnapshot()) {
snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId); snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId);
} else if (referred.isDirectory()) { } else if (referred.isDirectory()) {
@ -569,7 +570,7 @@ public abstract class INodeReference extends INode {
* latest snapshot. Otherwise changes will be recorded to the snapshot * latest snapshot. Otherwise changes will be recorded to the snapshot
* belonging to the src of the rename. * belonging to the src of the rename.
* *
* {@link Snapshot#INVALID_ID} means no dstSnapshot (e.g., src of the * {@link Snapshot#NO_SNAPSHOT_ID} means no dstSnapshot (e.g., src of the
* first-time rename). * first-time rename).
*/ */
private final int dstSnapshotId; private final int dstSnapshotId;
@ -587,25 +588,27 @@ public abstract class INodeReference extends INode {
} }
@Override @Override
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, public Quota.Counts cleanSubtree(int snapshot, int prior,
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes, BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
final boolean countDiffChange) throws QuotaExceededException { final boolean countDiffChange) throws QuotaExceededException {
if (snapshot == null && prior == null) { if (snapshot == Snapshot.CURRENT_STATE_ID
&& prior == Snapshot.NO_SNAPSHOT_ID) {
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
this.computeQuotaUsage(counts, true); this.computeQuotaUsage(counts, true);
destroyAndCollectBlocks(collectedBlocks, removedINodes); destroyAndCollectBlocks(collectedBlocks, removedINodes);
return counts; return counts;
} else { } else {
// if prior is null, we need to check snapshot belonging to the previous // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to
// WithName instance // the previous WithName instance
if (prior == null) { if (prior == Snapshot.NO_SNAPSHOT_ID) {
prior = getPriorSnapshot(this); prior = getPriorSnapshot(this);
} }
// if prior is not null, and prior is not before the to-be-deleted // if prior is not NO_SNAPSHOT_ID, and prior is not before the
// snapshot, we can quit here and leave the snapshot deletion work to // to-be-deleted snapshot, we can quit here and leave the snapshot
// the src tree of rename // deletion work to the src tree of rename
if (snapshot != null && prior != null if (snapshot != Snapshot.CURRENT_STATE_ID
&& Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) { && prior != Snapshot.NO_SNAPSHOT_ID
&& Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
return Quota.Counts.newInstance(); return Quota.Counts.newInstance();
} }
return getReferredINode().cleanSubtree(snapshot, prior, return getReferredINode().cleanSubtree(snapshot, prior,
@ -632,12 +635,12 @@ public abstract class INodeReference extends INode {
} else { } else {
// we will clean everything, including files, directories, and // we will clean everything, including files, directories, and
// snapshots, that were created after this prior snapshot // snapshots, that were created after this prior snapshot
Snapshot prior = getPriorSnapshot(this); int prior = getPriorSnapshot(this);
// prior must be non-null, otherwise we do not have any previous // prior must be non-null, otherwise we do not have any previous
// WithName nodes, and the reference number will be 0. // WithName nodes, and the reference number will be 0.
Preconditions.checkState(prior != null); Preconditions.checkState(prior != Snapshot.NO_SNAPSHOT_ID);
// identify the snapshot created after prior // identify the snapshot created after prior
Snapshot snapshot = getSelfSnapshot(prior); int snapshot = getSelfSnapshot(prior);
INode referred = getReferredINode().asReference().getReferredINode(); INode referred = getReferredINode().asReference().getReferredINode();
if (referred.isFile()) { if (referred.isFile()) {
@ -671,23 +674,23 @@ public abstract class INodeReference extends INode {
} }
} }
private Snapshot getSelfSnapshot(final Snapshot prior) { private int getSelfSnapshot(final int prior) {
WithCount wc = (WithCount) getReferredINode().asReference(); WithCount wc = (WithCount) getReferredINode().asReference();
INode referred = wc.getReferredINode(); INode referred = wc.getReferredINode();
Snapshot lastSnapshot = null; int lastSnapshot = Snapshot.CURRENT_STATE_ID;
if (referred.isFile() && referred.asFile().isWithSnapshot()) { if (referred.isFile() && referred.asFile().isWithSnapshot()) {
lastSnapshot = referred.asFile().getDiffs().getLastSnapshot(); lastSnapshot = referred.asFile().getDiffs().getLastSnapshotId();
} else if (referred.isDirectory()) { } else if (referred.isDirectory()) {
DirectoryWithSnapshotFeature sf = referred.asDirectory() DirectoryWithSnapshotFeature sf = referred.asDirectory()
.getDirectoryWithSnapshotFeature(); .getDirectoryWithSnapshotFeature();
if (sf != null) { if (sf != null) {
lastSnapshot = sf.getLastSnapshot(); lastSnapshot = sf.getLastSnapshotId();
} }
} }
if (lastSnapshot != null && !lastSnapshot.equals(prior)) { if (lastSnapshot != Snapshot.CURRENT_STATE_ID && lastSnapshot != prior) {
return lastSnapshot; return lastSnapshot;
} else { } else {
return null; return Snapshot.CURRENT_STATE_ID;
} }
} }
} }

View File

@ -45,10 +45,10 @@ public class INodeSymlink extends INodeWithAdditionalFields {
} }
@Override @Override
INode recordModification(Snapshot latest) throws QuotaExceededException { INode recordModification(int latestSnapshotId) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) { if (isInLatestSnapshot(latestSnapshotId)) {
INodeDirectory parent = getParent(); INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this)); parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
} }
return this; return this;
} }
@ -74,10 +74,11 @@ public class INodeSymlink extends INodeWithAdditionalFields {
} }
@Override @Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, public Quota.Counts cleanSubtree(final int snapshotId, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) { final List<INode> removedINodes, final boolean countDiffChange) {
if (snapshot == null && prior == null) { if (snapshotId == Snapshot.CURRENT_STATE_ID
&& priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
destroyAndCollectBlocks(collectedBlocks, removedINodes); destroyAndCollectBlocks(collectedBlocks, removedINodes);
} }
return Quota.Counts.newInstance(1, 0); return Quota.Counts.newInstance(1, 0);
@ -105,7 +106,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
@Override @Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) { final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot); super.dumpTreeRecursively(out, prefix, snapshot);
out.println(); out.println();
} }

View File

@ -154,9 +154,9 @@ public abstract class INodeWithAdditionalFields extends INode
} }
@Override @Override
final PermissionStatus getPermissionStatus(Snapshot snapshot) { final PermissionStatus getPermissionStatus(int snapshotId) {
return new PermissionStatus(getUserName(snapshot), getGroupName(snapshot), return new PermissionStatus(getUserName(snapshotId), getGroupName(snapshotId),
getFsPermission(snapshot)); getFsPermission(snapshotId));
} }
private final void updatePermissionStatus(PermissionStatusFormat f, long n) { private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
@ -164,9 +164,9 @@ public abstract class INodeWithAdditionalFields extends INode
} }
@Override @Override
final String getUserName(Snapshot snapshot) { final String getUserName(int snapshotId) {
if (snapshot != null) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getUserName(); return getSnapshotINode(snapshotId).getUserName();
} }
int n = (int)PermissionStatusFormat.USER.retrieve(permission); int n = (int)PermissionStatusFormat.USER.retrieve(permission);
@ -180,9 +180,9 @@ public abstract class INodeWithAdditionalFields extends INode
} }
@Override @Override
final String getGroupName(Snapshot snapshot) { final String getGroupName(int snapshotId) {
if (snapshot != null) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getGroupName(); return getSnapshotINode(snapshotId).getGroupName();
} }
int n = (int)PermissionStatusFormat.GROUP.retrieve(permission); int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
@ -196,9 +196,9 @@ public abstract class INodeWithAdditionalFields extends INode
} }
@Override @Override
final FsPermission getFsPermission(Snapshot snapshot) { final FsPermission getFsPermission(int snapshotId) {
if (snapshot != null) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getFsPermission(); return getSnapshotINode(snapshotId).getFsPermission();
} }
return new FsPermission(getFsPermissionShort()); return new FsPermission(getFsPermissionShort());
@ -220,9 +220,9 @@ public abstract class INodeWithAdditionalFields extends INode
} }
@Override @Override
final long getModificationTime(Snapshot snapshot) { final long getModificationTime(int snapshotId) {
if (snapshot != null) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getModificationTime(); return getSnapshotINode(snapshotId).getModificationTime();
} }
return this.modificationTime; return this.modificationTime;
@ -231,13 +231,13 @@ public abstract class INodeWithAdditionalFields extends INode
/** Update modification time if it is larger than the current value. */ /** Update modification time if it is larger than the current value. */
@Override @Override
public final INode updateModificationTime(long mtime, Snapshot latest) public final INode updateModificationTime(long mtime, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
Preconditions.checkState(isDirectory()); Preconditions.checkState(isDirectory());
if (mtime <= modificationTime) { if (mtime <= modificationTime) {
return this; return this;
} }
return setModificationTime(mtime, latest); return setModificationTime(mtime, latestSnapshotId);
} }
final void cloneModificationTime(INodeWithAdditionalFields that) { final void cloneModificationTime(INodeWithAdditionalFields that) {
@ -250,11 +250,10 @@ public abstract class INodeWithAdditionalFields extends INode
} }
@Override @Override
final long getAccessTime(Snapshot snapshot) { final long getAccessTime(int snapshotId) {
if (snapshot != null) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getAccessTime(); return getSnapshotINode(snapshotId).getAccessTime();
} }
return accessTime; return accessTime;
} }

View File

@ -135,8 +135,8 @@ public class INodesInPath {
if (!isRef && isDir && dir.isWithSnapshot()) { if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot. //if the path is a non-snapshot path, update the latest snapshot.
if (!existing.isSnapshot()) { if (!existing.isSnapshot()) {
existing.updateLatestSnapshot(dir.getDirectoryWithSnapshotFeature() existing.updateLatestSnapshotId(dir.getDirectoryWithSnapshotFeature()
.getLastSnapshot()); .getLastSnapshotId());
} }
} else if (isRef && isDir && !lastComp) { } else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot: // If the curNode is a reference node, need to check its dstSnapshot:
@ -151,16 +151,17 @@ public class INodesInPath {
// recordModification method. // recordModification method.
if (!existing.isSnapshot()) { if (!existing.isSnapshot()) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId(); int dstSnapshotId = curNode.asReference().getDstSnapshotId();
Snapshot latest = existing.getLatestSnapshot(); int latest = existing.getLatestSnapshotId();
if (latest == null || // no snapshot in dst tree of rename if (latest == Snapshot.CURRENT_STATE_ID || // no snapshot in dst tree of rename
dstSnapshotId >= latest.getId()) { // the above scenario (dstSnapshotId != Snapshot.CURRENT_STATE_ID &&
Snapshot lastSnapshot = null; dstSnapshotId >= latest)) { // the above scenario
int lastSnapshot = Snapshot.CURRENT_STATE_ID;
DirectoryWithSnapshotFeature sf = null; DirectoryWithSnapshotFeature sf = null;
if (curNode.isDirectory() && if (curNode.isDirectory() &&
(sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) { (sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshot(); lastSnapshot = sf.getLastSnapshotId();
} }
existing.setSnapshot(lastSnapshot); existing.setSnapshotId(lastSnapshot);
} }
} }
} }
@ -206,14 +207,14 @@ public class INodesInPath {
curNode = null; curNode = null;
} else { } else {
curNode = s.getRoot(); curNode = s.getRoot();
existing.setSnapshot(s); existing.setSnapshotId(s.getId());
} }
if (index >= -1) { if (index >= -1) {
existing.snapshotRootIndex = existing.numNonNull; existing.snapshotRootIndex = existing.numNonNull;
} }
} else { } else {
// normal case, and also for resolving file/dir under snapshot root // normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName, existing.getPathSnapshot()); curNode = dir.getChild(childName, existing.getPathSnapshotId());
} }
count++; count++;
index++; index++;
@ -245,11 +246,12 @@ public class INodesInPath {
*/ */
private int snapshotRootIndex; private int snapshotRootIndex;
/** /**
* For snapshot paths, it is the reference to the snapshot; or null if the * For snapshot paths, it is the id of the snapshot; or
* snapshot does not exist. For non-snapshot paths, it is the reference to * {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For
* the latest snapshot found in the path; or null if no snapshot is found. * non-snapshot paths, it is the id of the latest snapshot found in the path;
* or {@link Snapshot#CURRENT_STATE_ID} if no snapshot is found.
*/ */
private Snapshot snapshot = null; private int snapshotId = Snapshot.CURRENT_STATE_ID;
private INodesInPath(byte[][] path, int number) { private INodesInPath(byte[][] path, int number) {
this.path = path; this.path = path;
@ -262,29 +264,30 @@ public class INodesInPath {
} }
/** /**
* For non-snapshot paths, return the latest snapshot found in the path. * For non-snapshot paths, return the latest snapshot id found in the path.
* For snapshot paths, return null.
*/ */
public Snapshot getLatestSnapshot() { public int getLatestSnapshotId() {
return isSnapshot? null: snapshot; Preconditions.checkState(!isSnapshot);
return snapshotId;
} }
/** /**
* For snapshot paths, return the snapshot specified in the path. * For snapshot paths, return the id of the snapshot specified in the path.
* For non-snapshot paths, return null. * For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}.
*/ */
public Snapshot getPathSnapshot() { public int getPathSnapshotId() {
return isSnapshot? snapshot: null; return isSnapshot ? snapshotId : Snapshot.CURRENT_STATE_ID;
} }
private void setSnapshot(Snapshot s) { private void setSnapshotId(int sid) {
snapshot = s; snapshotId = sid;
} }
private void updateLatestSnapshot(Snapshot s) { private void updateLatestSnapshotId(int sid) {
if (snapshot == null if (snapshotId == Snapshot.CURRENT_STATE_ID
|| (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) { || (sid != Snapshot.CURRENT_STATE_ID && Snapshot.ID_INTEGER_COMPARATOR
snapshot = s; .compare(snapshotId, sid) < 0)) {
snapshotId = sid;
} }
} }
@ -386,7 +389,7 @@ public class INodesInPath {
.append("\n capacity = ").append(capacity) .append("\n capacity = ").append(capacity)
.append("\n isSnapshot = ").append(isSnapshot) .append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotRootIndex = ").append(snapshotRootIndex) .append("\n snapshotRootIndex = ").append(snapshotRootIndex)
.append("\n snapshot = ").append(snapshot); .append("\n snapshotId = ").append(snapshotId);
return b.toString(); return b.toString();
} }

View File

@ -480,6 +480,14 @@ public class NameNode implements NameNodeStatusMXBean {
* @param conf the configuration * @param conf the configuration
*/ */
protected void initialize(Configuration conf) throws IOException { protected void initialize(Configuration conf) throws IOException {
if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
if (intervals != null) {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
intervals);
}
}
UserGroupInformation.setConfiguration(conf); UserGroupInformation.setConfiguration(conf);
loginAsNameNodeUser(conf); loginAsNameNodeUser(conf);

View File

@ -22,8 +22,8 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -52,8 +52,8 @@ abstract class AbstractINodeDiff<N extends INode,
D extends AbstractINodeDiff<N, A, D>> D extends AbstractINodeDiff<N, A, D>>
implements Comparable<Integer> { implements Comparable<Integer> {
/** The snapshot will be obtained after this diff is applied. */ /** The id of the corresponding snapshot. */
Snapshot snapshot; private int snapshotId;
/** The snapshot inode data. It is null when there is no change. */ /** The snapshot inode data. It is null when there is no change. */
A snapshotINode; A snapshotINode;
/** /**
@ -64,10 +64,8 @@ abstract class AbstractINodeDiff<N extends INode,
*/ */
private D posteriorDiff; private D posteriorDiff;
AbstractINodeDiff(Snapshot snapshot, A snapshotINode, D posteriorDiff) { AbstractINodeDiff(int snapshotId, A snapshotINode, D posteriorDiff) {
Preconditions.checkNotNull(snapshot, "snapshot is null"); this.snapshotId = snapshotId;
this.snapshot = snapshot;
this.snapshotINode = snapshotINode; this.snapshotINode = snapshotINode;
this.posteriorDiff = posteriorDiff; this.posteriorDiff = posteriorDiff;
} }
@ -75,16 +73,16 @@ abstract class AbstractINodeDiff<N extends INode,
/** Compare diffs with snapshot ID. */ /** Compare diffs with snapshot ID. */
@Override @Override
public final int compareTo(final Integer that) { public final int compareTo(final Integer that) {
return Snapshot.ID_INTEGER_COMPARATOR.compare(this.snapshot.getId(), that); return Snapshot.ID_INTEGER_COMPARATOR.compare(this.snapshotId, that);
} }
/** @return the snapshot object of this diff. */ /** @return the snapshot object of this diff. */
public final Snapshot getSnapshot() { public final int getSnapshotId() {
return snapshot; return snapshotId;
} }
final void setSnapshot(Snapshot snapshot) { final void setSnapshotId(int snapshot) {
this.snapshot = snapshot; this.snapshotId = snapshot;
} }
/** @return the posterior diff. */ /** @return the posterior diff. */
@ -132,13 +130,12 @@ abstract class AbstractINodeDiff<N extends INode,
@Override @Override
public String toString() { public String toString() {
return getClass().getSimpleName() + ": " + snapshot + " (post=" return getClass().getSimpleName() + ": " + this.getSnapshotId() + " (post="
+ (posteriorDiff == null? null: posteriorDiff.snapshot) + ")"; + (posteriorDiff == null? null: posteriorDiff.getSnapshotId()) + ")";
} }
void writeSnapshot(DataOutput out) throws IOException { void writeSnapshot(DataOutput out) throws IOException {
// Assume the snapshot is recorded before, write id only. out.writeInt(snapshotId);
out.writeInt(snapshot.getId());
} }
abstract void write(DataOutput out, ReferenceMap referenceMap abstract void write(DataOutput out, ReferenceMap referenceMap

View File

@ -53,7 +53,7 @@ abstract class AbstractINodeDiffList<N extends INode,
} }
/** @return an {@link AbstractINodeDiff}. */ /** @return an {@link AbstractINodeDiff}. */
abstract D createDiff(Snapshot snapshot, N currentINode); abstract D createDiff(int snapshotId, N currentINode);
/** @return a snapshot copy of the current inode. */ /** @return a snapshot copy of the current inode. */
abstract A createSnapshotCopy(N currentINode); abstract A createSnapshotCopy(N currentINode);
@ -63,25 +63,25 @@ abstract class AbstractINodeDiffList<N extends INode,
* outside. If the diff to remove is not the first one in the diff list, we * outside. If the diff to remove is not the first one in the diff list, we
* need to combine the diff with its previous one. * need to combine the diff with its previous one.
* *
* @param snapshot The snapshot to be deleted * @param snapshot The id of the snapshot to be deleted
* @param prior The snapshot taken before the to-be-deleted snapshot * @param prior The id of the snapshot taken before the to-be-deleted snapshot
* @param collectedBlocks Used to collect information for blocksMap update * @param collectedBlocks Used to collect information for blocksMap update
* @return delta in namespace. * @return delta in namespace.
*/ */
public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, public final Quota.Counts deleteSnapshotDiff(final int snapshot,
Snapshot prior, final N currentINode, final int prior, final N currentINode,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, boolean countDiffChange) final List<INode> removedINodes, boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId()); int snapshotIndex = Collections.binarySearch(diffs, snapshot);
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
D removed = null; D removed = null;
if (snapshotIndex == 0) { if (snapshotIndex == 0) {
if (prior != null) { if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
// set the snapshot to latestBefore // set the snapshot to latestBefore
diffs.get(snapshotIndex).setSnapshot(prior); diffs.get(snapshotIndex).setSnapshotId(prior);
} else { } else { // there is no snapshot before
removed = diffs.remove(0); removed = diffs.remove(0);
if (countDiffChange) { if (countDiffChange) {
counts.add(Quota.NAMESPACE, 1); counts.add(Quota.NAMESPACE, 1);
@ -96,8 +96,8 @@ abstract class AbstractINodeDiffList<N extends INode,
} }
} else if (snapshotIndex > 0) { } else if (snapshotIndex > 0) {
final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1); final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
if (!previous.getSnapshot().equals(prior)) { if (previous.getSnapshotId() != prior) {
diffs.get(snapshotIndex).setSnapshot(prior); diffs.get(snapshotIndex).setSnapshotId(prior);
} else { } else {
// combine the to-be-removed diff with its previous diff // combine the to-be-removed diff with its previous diff
removed = diffs.remove(snapshotIndex); removed = diffs.remove(snapshotIndex);
@ -120,10 +120,10 @@ abstract class AbstractINodeDiffList<N extends INode,
} }
/** Add an {@link AbstractINodeDiff} for the given snapshot. */ /** Add an {@link AbstractINodeDiff} for the given snapshot. */
final D addDiff(Snapshot latest, N currentINode) final D addDiff(int latestSnapshotId, N currentINode)
throws QuotaExceededException { throws QuotaExceededException {
currentINode.addSpaceConsumed(1, 0, true); currentINode.addSpaceConsumed(1, 0, true);
return addLast(createDiff(latest, currentINode)); return addLast(createDiff(latestSnapshotId, currentINode));
} }
/** Append the diff at the end of the list. */ /** Append the diff at the end of the list. */
@ -149,10 +149,10 @@ abstract class AbstractINodeDiffList<N extends INode,
return n == 0? null: diffs.get(n - 1); return n == 0? null: diffs.get(n - 1);
} }
/** @return the last snapshot. */ /** @return the id of the last snapshot. */
public final Snapshot getLastSnapshot() { public final int getLastSnapshotId() {
final AbstractINodeDiff<N, A, D> last = getLast(); final AbstractINodeDiff<N, A, D> last = getLast();
return last == null? null: last.getSnapshot(); return last == null ? Snapshot.CURRENT_STATE_ID : last.getSnapshotId();
} }
/** /**
@ -161,60 +161,49 @@ abstract class AbstractINodeDiffList<N extends INode,
* snapshot id. * snapshot id.
* @param exclusive True means the returned snapshot's id must be < the given * @param exclusive True means the returned snapshot's id must be < the given
* id, otherwise <=. * id, otherwise <=.
* @return The latest snapshot before the given snapshot. * @return The id of the latest snapshot before the given snapshot.
*/ */
private final Snapshot getPrior(int anchorId, boolean exclusive) { private final int getPrior(int anchorId, boolean exclusive) {
if (anchorId == Snapshot.INVALID_ID) { if (anchorId == Snapshot.CURRENT_STATE_ID) {
return getLastSnapshot(); return getLastSnapshotId();
} }
final int i = Collections.binarySearch(diffs, anchorId); final int i = Collections.binarySearch(diffs, anchorId);
if (exclusive) { // must be the one before if (exclusive) { // must be the one before
if (i == -1 || i == 0) { if (i == -1 || i == 0) {
return null; return Snapshot.NO_SNAPSHOT_ID;
} else { } else {
int priorIndex = i > 0 ? i - 1 : -i - 2; int priorIndex = i > 0 ? i - 1 : -i - 2;
return diffs.get(priorIndex).getSnapshot(); return diffs.get(priorIndex).getSnapshotId();
} }
} else { // the one, or the one before if not existing } else { // the one, or the one before if not existing
if (i >= 0) { if (i >= 0) {
return diffs.get(i).getSnapshot(); return diffs.get(i).getSnapshotId();
} else if (i < -1) { } else if (i < -1) {
return diffs.get(-i - 2).getSnapshot(); return diffs.get(-i - 2).getSnapshotId();
} else { // i == -1 } else { // i == -1
return null; return Snapshot.NO_SNAPSHOT_ID;
} }
} }
} }
public final Snapshot getPrior(int snapshotId) { public final int getPrior(int snapshotId) {
return getPrior(snapshotId, false); return getPrior(snapshotId, false);
} }
/** /**
* Update the prior snapshot. * Update the prior snapshot.
*/ */
final Snapshot updatePrior(Snapshot snapshot, Snapshot prior) { final int updatePrior(int snapshot, int prior) {
int id = snapshot == null ? Snapshot.INVALID_ID : snapshot.getId(); int p = getPrior(snapshot, true);
Snapshot s = getPrior(id, true); if (p != Snapshot.CURRENT_STATE_ID
if (s != null && && Snapshot.ID_INTEGER_COMPARATOR.compare(p, prior) > 0) {
(prior == null || Snapshot.ID_COMPARATOR.compare(s, prior) > 0)) { return p;
return s;
} }
return prior; return prior;
} }
/**
* @return the diff corresponding to the given snapshot.
* When the diff is null, it means that the current state and
* the corresponding snapshot state are the same.
*/
public final D getDiff(Snapshot snapshot) {
return getDiffById(snapshot == null ?
Snapshot.INVALID_ID : snapshot.getId());
}
private final D getDiffById(final int snapshotId) { public final D getDiffById(final int snapshotId) {
if (snapshotId == Snapshot.INVALID_ID) { if (snapshotId == Snapshot.CURRENT_STATE_ID) {
return null; return null;
} }
final int i = Collections.binarySearch(diffs, snapshotId); final int i = Collections.binarySearch(diffs, snapshotId);
@ -234,9 +223,9 @@ abstract class AbstractINodeDiffList<N extends INode,
* Search for the snapshot whose id is 1) no less than the given id, * Search for the snapshot whose id is 1) no less than the given id,
* and 2) most close to the given id. * and 2) most close to the given id.
*/ */
public final Snapshot getSnapshotById(final int snapshotId) { public final int getSnapshotById(final int snapshotId) {
D diff = getDiffById(snapshotId); D diff = getDiffById(snapshotId);
return diff == null ? null : diff.getSnapshot(); return diff == null ? Snapshot.CURRENT_STATE_ID : diff.getSnapshotId();
} }
/** /**
@ -271,8 +260,8 @@ abstract class AbstractINodeDiffList<N extends INode,
* Note that the current inode is returned if there is no change * Note that the current inode is returned if there is no change
* between the given snapshot and the current state. * between the given snapshot and the current state.
*/ */
public A getSnapshotINode(final Snapshot snapshot, final A currentINode) { public A getSnapshotINode(final int snapshotId, final A currentINode) {
final D diff = getDiff(snapshot); final D diff = getDiffById(snapshotId);
final A inode = diff == null? null: diff.getSnapshotINode(); final A inode = diff == null? null: diff.getSnapshotINode();
return inode == null? currentINode: inode; return inode == null? currentINode: inode;
} }
@ -281,15 +270,16 @@ abstract class AbstractINodeDiffList<N extends INode,
* Check if the latest snapshot diff exists. If not, add it. * Check if the latest snapshot diff exists. If not, add it.
* @return the latest snapshot diff, which is never null. * @return the latest snapshot diff, which is never null.
*/ */
final D checkAndAddLatestSnapshotDiff(Snapshot latest, N currentINode) final D checkAndAddLatestSnapshotDiff(int latestSnapshotId, N currentINode)
throws QuotaExceededException { throws QuotaExceededException {
final D last = getLast(); final D last = getLast();
if (last != null if (last != null
&& Snapshot.ID_COMPARATOR.compare(last.getSnapshot(), latest) >= 0) { && Snapshot.ID_INTEGER_COMPARATOR.compare(last.getSnapshotId(),
latestSnapshotId) >= 0) {
return last; return last;
} else { } else {
try { try {
return addDiff(latest, currentINode); return addDiff(latestSnapshotId, currentINode);
} catch(NSQuotaExceededException e) { } catch(NSQuotaExceededException e) {
e.setMessagePrefix("Failed to record modification for snapshot"); e.setMessagePrefix("Failed to record modification for snapshot");
throw e; throw e;
@ -298,10 +288,10 @@ abstract class AbstractINodeDiffList<N extends INode,
} }
/** Save the snapshot copy to the latest snapshot. */ /** Save the snapshot copy to the latest snapshot. */
public void saveSelf2Snapshot(Snapshot latest, N currentINode, A snapshotCopy) public void saveSelf2Snapshot(int latestSnapshotId, N currentINode,
throws QuotaExceededException { A snapshotCopy) throws QuotaExceededException {
if (latest != null) { if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
D diff = checkAndAddLatestSnapshotDiff(latest, currentINode); D diff = checkAndAddLatestSnapshotDiff(latestSnapshotId, currentINode);
if (diff.snapshotINode == null) { if (diff.snapshotINode == null) {
if (snapshotCopy == null) { if (snapshotCopy == null) {
snapshotCopy = createSnapshotCopy(currentINode); snapshotCopy = createSnapshotCopy(currentINode);

View File

@ -225,30 +225,36 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
private final int childrenSize; private final int childrenSize;
/** The children list diff. */ /** The children list diff. */
private final ChildrenDiff diff; private final ChildrenDiff diff;
private boolean isSnapshotRoot = false;
private DirectoryDiff(int snapshotId, INodeDirectory dir) {
super(snapshotId, null, null);
private DirectoryDiff(Snapshot snapshot, INodeDirectory dir) { this.childrenSize = dir.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
super(snapshot, null, null);
this.childrenSize = dir.getChildrenList(null).size();
this.diff = new ChildrenDiff(); this.diff = new ChildrenDiff();
} }
/** Constructor used by FSImage loading */ /** Constructor used by FSImage loading */
DirectoryDiff(Snapshot snapshot, INodeDirectoryAttributes snapshotINode, DirectoryDiff(int snapshotId, INodeDirectoryAttributes snapshotINode,
DirectoryDiff posteriorDiff, int childrenSize, DirectoryDiff posteriorDiff, int childrenSize, List<INode> createdList,
List<INode> createdList, List<INode> deletedList) { List<INode> deletedList, boolean isSnapshotRoot) {
super(snapshot, snapshotINode, posteriorDiff); super(snapshotId, snapshotINode, posteriorDiff);
this.childrenSize = childrenSize; this.childrenSize = childrenSize;
this.diff = new ChildrenDiff(createdList, deletedList); this.diff = new ChildrenDiff(createdList, deletedList);
this.isSnapshotRoot = isSnapshotRoot;
} }
ChildrenDiff getChildrenDiff() { ChildrenDiff getChildrenDiff() {
return diff; return diff;
} }
/** Is the inode the root of the snapshot? */ void setSnapshotRoot(INodeDirectoryAttributes root) {
this.snapshotINode = root;
this.isSnapshotRoot = true;
}
boolean isSnapshotRoot() { boolean isSnapshotRoot() {
return snapshotINode == snapshot.getRoot(); return isSnapshotRoot;
} }
@Override @Override
@ -287,7 +293,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
combined.combinePosterior(d.diff, null); combined.combinePosterior(d.diff, null);
} }
children = combined.apply2Current(ReadOnlyList.Util.asList( children = combined.apply2Current(ReadOnlyList.Util.asList(
currentDir.getChildrenList(null))); currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
} }
return children; return children;
} }
@ -327,7 +333,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
return null; return null;
} else if (d.getPosterior() == null) { } else if (d.getPosterior() == null) {
// no more posterior diff, get from current inode. // no more posterior diff, get from current inode.
return currentDir.getChild(name, null); return currentDir.getChild(name, Snapshot.CURRENT_STATE_ID);
} }
} }
} }
@ -342,11 +348,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
writeSnapshot(out); writeSnapshot(out);
out.writeInt(childrenSize); out.writeInt(childrenSize);
// write snapshotINode // Write snapshotINode
if (isSnapshotRoot()) { out.writeBoolean(isSnapshotRoot);
out.writeBoolean(true); if (!isSnapshotRoot) {
} else {
out.writeBoolean(false);
if (snapshotINode != null) { if (snapshotINode != null) {
out.writeBoolean(true); out.writeBoolean(true);
FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out); FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out);
@ -373,7 +377,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
extends AbstractINodeDiffList<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> { extends AbstractINodeDiffList<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
@Override @Override
DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) { DirectoryDiff createDiff(int snapshot, INodeDirectory currentDir) {
return new DirectoryDiff(snapshot, currentDir); return new DirectoryDiff(snapshot, currentDir);
} }
@ -424,12 +428,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
/** /**
* Destroy a subtree under a DstReference node. * Destroy a subtree under a DstReference node.
*/ */
public static void destroyDstSubtree(INode inode, final Snapshot snapshot, public static void destroyDstSubtree(INode inode, final int snapshot,
final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, final int prior, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) throws QuotaExceededException { final List<INode> removedINodes) throws QuotaExceededException {
Preconditions.checkArgument(prior != null); Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
if (inode.isReference()) { if (inode.isReference()) {
if (inode instanceof INodeReference.WithName && snapshot != null) { if (inode instanceof INodeReference.WithName
&& snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference // this inode has been renamed before the deletion of the DstReference
// subtree // subtree
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
@ -447,18 +452,18 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) { if (sf != null) {
DirectoryDiffList diffList = sf.getDiffs(); DirectoryDiffList diffList = sf.getDiffs();
DirectoryDiff priorDiff = diffList.getDiff(prior); DirectoryDiff priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> dList = priorDiff.diff.getList(ListType.DELETED); List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
excludedNodes = cloneDiffList(dList); excludedNodes = cloneDiffList(dList);
} }
if (snapshot != null) { if (snapshot != Snapshot.CURRENT_STATE_ID) {
diffList.deleteSnapshotDiff(snapshot, prior, dir, collectedBlocks, diffList.deleteSnapshotDiff(snapshot, prior, dir, collectedBlocks,
removedINodes, true); removedINodes, true);
} }
priorDiff = diffList.getDiff(prior); priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorDiff.diff.destroyCreatedList(dir, collectedBlocks, priorDiff.diff.destroyCreatedList(dir, collectedBlocks,
removedINodes); removedINodes);
} }
@ -478,14 +483,14 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
* deleted list of prior. * deleted list of prior.
* @param inode The inode to clean. * @param inode The inode to clean.
* @param post The post snapshot. * @param post The post snapshot.
* @param prior The prior snapshot. * @param prior The id of the prior snapshot.
* @param collectedBlocks Used to collect blocks for later deletion. * @param collectedBlocks Used to collect blocks for later deletion.
* @return Quota usage update. * @return Quota usage update.
*/ */
private static Quota.Counts cleanDeletedINode(INode inode, private static Quota.Counts cleanDeletedINode(INode inode,
final Snapshot post, final Snapshot prior, final int post, final int prior,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
Deque<INode> queue = new ArrayDeque<INode>(); Deque<INode> queue = new ArrayDeque<INode>();
@ -494,7 +499,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
INode topNode = queue.pollFirst(); INode topNode = queue.pollFirst();
if (topNode instanceof INodeReference.WithName) { if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode; INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post.getId()) { if (wn.getLastSnapshotId() >= post) {
wn.cleanSubtree(post, prior, collectedBlocks, removedINodes, wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
countDiffChange); countDiffChange);
} }
@ -511,8 +516,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
if (sf != null) { if (sf != null) {
// delete files/dirs created after prior. Note that these // delete files/dirs created after prior. Note that these
// files/dirs, along with inode, were deleted right after post. // files/dirs, along with inode, were deleted right after post.
DirectoryDiff priorDiff = sf.getDiffs().getDiff(prior); DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorChildrenDiff = priorDiff.getChildrenDiff(); priorChildrenDiff = priorDiff.getChildrenDiff();
counts.add(priorChildrenDiff.destroyCreatedList(dir, counts.add(priorChildrenDiff.destroyCreatedList(dir,
collectedBlocks, removedINodes)); collectedBlocks, removedINodes));
@ -540,8 +545,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
} }
/** @return the last snapshot. */ /** @return the last snapshot. */
public Snapshot getLastSnapshot() { public int getLastSnapshotId() {
return diffs.getLastSnapshot(); return diffs.getLastSnapshotId();
} }
/** @return the snapshot diff list. */ /** @return the snapshot diff list. */
@ -565,11 +570,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
* to make sure that parent is in the given snapshot "latest". * to make sure that parent is in the given snapshot "latest".
*/ */
public boolean addChild(INodeDirectory parent, INode inode, public boolean addChild(INodeDirectory parent, INode inode,
boolean setModTime, Snapshot latest) throws QuotaExceededException { boolean setModTime, int latestSnapshotId) throws QuotaExceededException {
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff; ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
int undoInfo = diff.create(inode); int undoInfo = diff.create(inode);
final boolean added = parent.addChild(inode, setModTime, null); final boolean added = parent.addChild(inode, setModTime,
Snapshot.CURRENT_STATE_ID);
if (!added) { if (!added) {
diff.undoCreate(inode, undoInfo); diff.undoCreate(inode, undoInfo);
} }
@ -581,7 +588,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
* needs to make sure that parent is in the given snapshot "latest". * needs to make sure that parent is in the given snapshot "latest".
*/ */
public boolean removeChild(INodeDirectory parent, INode child, public boolean removeChild(INodeDirectory parent, INode child,
Snapshot latest) throws QuotaExceededException { int latestSnapshotId) throws QuotaExceededException {
// For a directory that is not a renamed node, if isInLatestSnapshot returns // For a directory that is not a renamed node, if isInLatestSnapshot returns
// false, the directory is not in the latest snapshot, thus we do not need // false, the directory is not in the latest snapshot, thus we do not need
// to record the removed child in any snapshot. // to record the removed child in any snapshot.
@ -593,7 +600,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
// directory node cannot be in any snapshot (not in current tree, nor in // directory node cannot be in any snapshot (not in current tree, nor in
// previous src tree). Thus we do not need to record the removed child in // previous src tree). Thus we do not need to record the removed child in
// any snapshot. // any snapshot.
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff; ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
UndoInfo<INode> undoInfo = diff.delete(child); UndoInfo<INode> undoInfo = diff.delete(child);
final boolean removed = parent.removeChild(child); final boolean removed = parent.removeChild(child);
@ -611,29 +619,29 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
* for the snapshot and return it. * for the snapshot and return it.
*/ */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode, public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
final Snapshot snapshot) { final int snapshotId) {
final DirectoryDiff diff = diffs.getDiff(snapshot); final DirectoryDiff diff = diffs.getDiffById(snapshotId);
return diff != null ? diff.getChildrenList(currentINode) : currentINode return diff != null ? diff.getChildrenList(currentINode) : currentINode
.getChildrenList(null); .getChildrenList(Snapshot.CURRENT_STATE_ID);
} }
public INode getChild(INodeDirectory currentINode, byte[] name, public INode getChild(INodeDirectory currentINode, byte[] name,
Snapshot snapshot) { int snapshotId) {
final DirectoryDiff diff = diffs.getDiff(snapshot); final DirectoryDiff diff = diffs.getDiffById(snapshotId);
return diff != null ? diff.getChild(name, true, currentINode) return diff != null ? diff.getChild(name, true, currentINode)
: currentINode.getChild(name, null); : currentINode.getChild(name, Snapshot.CURRENT_STATE_ID);
} }
/** Used to record the modification of a symlink node */ /** Used to record the modification of a symlink node */
public INode saveChild2Snapshot(INodeDirectory currentINode, public INode saveChild2Snapshot(INodeDirectory currentINode,
final INode child, final Snapshot latest, final INode snapshotCopy) final INode child, final int latestSnapshotId, final INode snapshotCopy)
throws QuotaExceededException { throws QuotaExceededException {
Preconditions.checkArgument(!child.isDirectory(), Preconditions.checkArgument(!child.isDirectory(),
"child is a directory, child=%s", child); "child is a directory, child=%s", child);
Preconditions.checkArgument(latest != null); Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(
currentINode); latestSnapshotId, currentINode);
if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) { if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) {
// it was already saved in the latest snapshot earlier. // it was already saved in the latest snapshot earlier.
return child; return child;
@ -656,7 +664,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) { public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
for(DirectoryDiff d : diffs) { for(DirectoryDiff d : diffs) {
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) { for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
deleted.computeQuotaUsage(counts, false, Snapshot.INVALID_ID); deleted.computeQuotaUsage(counts, false, Snapshot.CURRENT_STATE_ID);
} }
} }
counts.add(Quota.NAMESPACE, diffs.asList().size()); counts.add(Quota.NAMESPACE, diffs.asList().size());
@ -744,14 +752,14 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
} }
public Quota.Counts cleanDirectory(final INodeDirectory currentINode, public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
final Snapshot snapshot, Snapshot prior, final int snapshot, int prior,
final BlocksMapUpdateInfo collectedBlocks, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance(); Quota.Counts counts = Quota.Counts.newInstance();
Map<INode, INode> priorCreated = null; Map<INode, INode> priorCreated = null;
Map<INode, INode> priorDeleted = null; Map<INode, INode> priorDeleted = null;
if (snapshot == null) { // delete the current directory if (snapshot == Snapshot.CURRENT_STATE_ID) { // delete the current directory
currentINode.recordModification(prior); currentINode.recordModification(prior);
// delete everything in created list // delete everything in created list
DirectoryDiff lastDiff = diffs.getLast(); DirectoryDiff lastDiff = diffs.getLast();
@ -764,9 +772,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
prior = getDiffs().updatePrior(snapshot, prior); prior = getDiffs().updatePrior(snapshot, prior);
// if there is a snapshot diff associated with prior, we need to record // if there is a snapshot diff associated with prior, we need to record
// its original created and deleted list before deleting post // its original created and deleted list before deleting post
if (prior != null) { if (prior != Snapshot.NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> cList = priorDiff.diff.getList(ListType.CREATED); List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
List<INode> dList = priorDiff.diff.getList(ListType.DELETED); List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
priorCreated = cloneDiffList(cList); priorCreated = cloneDiffList(cList);
@ -774,13 +782,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
} }
} }
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, currentINode, counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
collectedBlocks, removedINodes, countDiffChange)); currentINode, collectedBlocks, removedINodes, countDiffChange));
// check priorDiff again since it may be created during the diff deletion // check priorDiff again since it may be created during the diff deletion
if (prior != null) { if (prior != Snapshot.NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
// For files/directories created between "prior" and "snapshot", // For files/directories created between "prior" and "snapshot",
// we need to clear snapshot copies for "snapshot". Note that we must // we need to clear snapshot copies for "snapshot". Note that we must
// use null as prior in the cleanSubtree call. Files/directories that // use null as prior in the cleanSubtree call. Files/directories that
@ -791,8 +799,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
for (INode cNode : priorDiff.getChildrenDiff().getList( for (INode cNode : priorDiff.getChildrenDiff().getList(
ListType.CREATED)) { ListType.CREATED)) {
if (priorCreated.containsKey(cNode)) { if (priorCreated.containsKey(cNode)) {
counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, counts.add(cNode.cleanSubtree(snapshot, Snapshot.NO_SNAPSHOT_ID,
removedINodes, countDiffChange)); collectedBlocks, removedINodes, countDiffChange));
} }
} }
} }

View File

@ -38,15 +38,15 @@ public class FileDiff extends
/** The file size at snapshot creation time. */ /** The file size at snapshot creation time. */
private final long fileSize; private final long fileSize;
FileDiff(Snapshot snapshot, INodeFile file) { FileDiff(int snapshotId, INodeFile file) {
super(snapshot, null, null); super(snapshotId, null, null);
fileSize = file.computeFileSize(); fileSize = file.computeFileSize();
} }
/** Constructor used by FSImage loading */ /** Constructor used by FSImage loading */
FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode, FileDiff(int snapshotId, INodeFileAttributes snapshotINode,
FileDiff posteriorDiff, long fileSize) { FileDiff posteriorDiff, long fileSize) {
super(snapshot, snapshotINode, posteriorDiff); super(snapshotId, snapshotINode, posteriorDiff);
this.fileSize = fileSize; this.fileSize = fileSize;
} }

View File

@ -25,8 +25,8 @@ public class FileDiffList extends
AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> { AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
@Override @Override
FileDiff createDiff(Snapshot snapshot, INodeFile file) { FileDiff createDiff(int snapshotId, INodeFile file) {
return new FileDiff(snapshot, file); return new FileDiff(snapshotId, file);
} }
@Override @Override

View File

@ -78,22 +78,22 @@ public class FileWithSnapshotFeature implements INode.Feature {
return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs; return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
} }
public Quota.Counts cleanFile(final INodeFile file, final Snapshot snapshot, public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException { throws QuotaExceededException {
if (snapshot == null) { if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature // delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) { if (!isCurrentFileDeleted()) {
file.recordModification(prior); file.recordModification(priorSnapshotId);
deleteCurrentFile(); deleteCurrentFile();
} }
collectBlocksAndClear(file, collectedBlocks, removedINodes); collectBlocksAndClear(file, collectedBlocks, removedINodes);
return Quota.Counts.newInstance(); return Quota.Counts.newInstance();
} else { // delete the snapshot } else { // delete the snapshot
prior = getDiffs().updatePrior(snapshot, prior); priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(snapshot, prior, file, collectedBlocks, return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
removedINodes, countDiffChange); collectedBlocks, removedINodes, countDiffChange);
} }
} }

View File

@ -206,6 +206,15 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
return i < 0? null: snapshotsByNames.get(i); return i < 0? null: snapshotsByNames.get(i);
} }
Snapshot getSnapshotById(int sid) {
for (Snapshot s : snapshotsByNames) {
if (s.getId() == sid) {
return s;
}
}
return null;
}
/** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */ /** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */
public ReadOnlyList<Snapshot> getSnapshotList() { public ReadOnlyList<Snapshot> getSnapshotList() {
return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames); return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames);
@ -297,13 +306,14 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
+ "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\"."); + "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
} }
final DirectoryDiff d = getDiffs().addDiff(s, this); final DirectoryDiff d = getDiffs().addDiff(id, this);
d.snapshotINode = s.getRoot(); d.setSnapshotRoot(s.getRoot());
snapshotsByNames.add(-i - 1, s); snapshotsByNames.add(-i - 1, s);
//set modification time //set modification time
updateModificationTime(Time.now(), null); updateModificationTime(Time.now(), Snapshot.CURRENT_STATE_ID);
s.getRoot().setModificationTime(getModificationTime(), null); s.getRoot().setModificationTime(getModificationTime(),
Snapshot.CURRENT_STATE_ID);
return s; return s;
} }
@ -326,10 +336,10 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
+ ": the snapshot does not exist."); + ": the snapshot does not exist.");
} else { } else {
final Snapshot snapshot = snapshotsByNames.get(i); final Snapshot snapshot = snapshotsByNames.get(i);
Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot); int prior = Snapshot.findLatestSnapshot(this, snapshot.getId());
try { try {
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks, Quota.Counts counts = cleanSubtree(snapshot.getId(), prior,
removedINodes, true); collectedBlocks, removedINodes, true);
INodeDirectory parent = getParent(); INodeDirectory parent = getParent();
if (parent != null) { if (parent != null) {
// there will not be any WithName node corresponding to the deleted // there will not be any WithName node corresponding to the deleted
@ -425,8 +435,9 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
diffReport.addDirDiff(dir, relativePath, diff); diffReport.addDirDiff(dir, relativePath, diff);
} }
} }
ReadOnlyList<INode> children = dir.getChildrenList(diffReport ReadOnlyList<INode> children = dir.getChildrenList(
.isFromEarlier() ? diffReport.to : diffReport.from); diffReport.isFromEarlier() ? Snapshot.getSnapshotId(diffReport.to) :
Snapshot.getSnapshotId(diffReport.from));
for (INode child : children) { for (INode child : children) {
final byte[] name = child.getLocalNameBytes(); final byte[] name = child.getLocalNameBytes();
if (diff.searchIndex(ListType.CREATED, name) < 0 if (diff.searchIndex(ListType.CREATED, name) < 0
@ -454,16 +465,15 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
* Replace itself with {@link INodeDirectoryWithSnapshot} or * Replace itself with {@link INodeDirectoryWithSnapshot} or
* {@link INodeDirectory} depending on the latest snapshot. * {@link INodeDirectory} depending on the latest snapshot.
*/ */
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap) INodeDirectory replaceSelf(final int latestSnapshotId, final INodeMap inodeMap)
throws QuotaExceededException { throws QuotaExceededException {
if (latest == null) { if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
Preconditions.checkState( Preconditions.checkState(getDirectoryWithSnapshotFeature()
getDirectoryWithSnapshotFeature().getLastSnapshot() == null, .getLastSnapshotId() == Snapshot.CURRENT_STATE_ID, "this=%s", this);
"latest == null but getLastSnapshot() != null, this=%s", this);
} }
INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap); INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
if (latest != null) { if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
dir.recordModification(latest); dir.recordModification(latestSnapshotId);
} }
return dir; return dir;
} }
@ -475,10 +485,10 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
@Override @Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
Snapshot snapshot) { int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot); super.dumpTreeRecursively(out, prefix, snapshot);
if (snapshot == null) { if (snapshot == Snapshot.CURRENT_STATE_ID) {
out.println(); out.println();
out.print(prefix); out.print(prefix);
@ -494,7 +504,8 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
n++; n++;
} }
} }
Preconditions.checkState(n == snapshotsByNames.size()); Preconditions.checkState(n == snapshotsByNames.size(), "#n=" + n
+ ", snapshotsByNames.size()=" + snapshotsByNames.size());
out.print(", #snapshot="); out.print(", #snapshot=");
out.println(n); out.println(n);
@ -522,8 +533,9 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
@Override @Override
public SnapshotAndINode next() { public SnapshotAndINode next() {
final Snapshot s = next.snapshot; final SnapshotAndINode pair = new SnapshotAndINode(next
final SnapshotAndINode pair = new SnapshotAndINode(s); .getSnapshotId(), getSnapshotById(next.getSnapshotId())
.getRoot());
next = findNext(); next = findNext();
return pair; return pair;
} }

View File

@ -37,7 +37,11 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
/** Snapshot of a sub-tree in the namesystem. */ /** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private @InterfaceAudience.Private
public class Snapshot implements Comparable<byte[]> { public class Snapshot implements Comparable<byte[]> {
public static final int INVALID_ID = -1; /**
* This id is used to indicate the current state (vs. snapshots)
*/
public static final int CURRENT_STATE_ID = Integer.MAX_VALUE - 1;
public static final int NO_SNAPSHOT_ID = -1;
/** /**
* The pattern for generating the default snapshot name. * The pattern for generating the default snapshot name.
@ -61,14 +65,18 @@ public class Snapshot implements Comparable<byte[]> {
.toString(); .toString();
} }
/** /**
* Get the name of the given snapshot. * Get the name of the given snapshot.
* @param s The given snapshot. * @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null * @return The name of the snapshot, or an empty string if {@code s} is null
*/ */
static String getSnapshotName(Snapshot s) { static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : ""; return s != null ? s.getRoot().getLocalName() : "";
} }
public static int getSnapshotId(Snapshot s) {
return s == null ? CURRENT_STATE_ID : s.getId();
}
/** /**
* Compare snapshot with IDs, where null indicates the current status thus * Compare snapshot with IDs, where null indicates the current status thus
@ -78,9 +86,8 @@ public class Snapshot implements Comparable<byte[]> {
= new Comparator<Snapshot>() { = new Comparator<Snapshot>() {
@Override @Override
public int compare(Snapshot left, Snapshot right) { public int compare(Snapshot left, Snapshot right) {
return ID_INTEGER_COMPARATOR.compare( return ID_INTEGER_COMPARATOR.compare(Snapshot.getSnapshotId(left),
left == null? null: left.getId(), Snapshot.getSnapshotId(right));
right == null? null: right.getId());
} }
}; };
@ -92,12 +99,9 @@ public class Snapshot implements Comparable<byte[]> {
= new Comparator<Integer>() { = new Comparator<Integer>() {
@Override @Override
public int compare(Integer left, Integer right) { public int compare(Integer left, Integer right) {
// null means the current state, thus should be the largest // Snapshot.CURRENT_STATE_ID means the current state, thus should be the
if (left == null) { // largest
return right == null? 0: 1; return left - right;
} else {
return right == null? -1: left - right;
}
} }
}; };
@ -108,12 +112,12 @@ public class Snapshot implements Comparable<byte[]> {
* is not null). * is not null).
* *
* @param inode the given inode that the returned snapshot needs to cover * @param inode the given inode that the returned snapshot needs to cover
* @param anchor the returned snapshot should be taken before this snapshot. * @param anchor the returned snapshot should be taken before this given id.
* @return the latest snapshot covers the given inode and was taken before the * @return id of the latest snapshot that covers the given inode and was taken
* the given snapshot (if it is not null). * before the the given snapshot (if it is not null).
*/ */
public static Snapshot findLatestSnapshot(INode inode, Snapshot anchor) { public static int findLatestSnapshot(INode inode, final int anchor) {
Snapshot latest = null; int latest = NO_SNAPSHOT_ID;
for(; inode != null; inode = inode.getParent()) { for(; inode != null; inode = inode.getParent()) {
if (inode.isDirectory()) { if (inode.isDirectory()) {
final INodeDirectory dir = inode.asDirectory(); final INodeDirectory dir = inode.asDirectory();
@ -139,13 +143,13 @@ public class Snapshot implements Comparable<byte[]> {
} }
@Override @Override
public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) { public ReadOnlyList<INode> getChildrenList(int snapshotId) {
return getParent().getChildrenList(snapshot); return getParent().getChildrenList(snapshotId);
} }
@Override @Override
public INode getChild(byte[] name, Snapshot snapshot) { public INode getChild(byte[] name, int snapshotId) {
return getParent().getChild(name, snapshot); return getParent().getChild(name, snapshotId);
} }
@Override @Override

View File

@ -118,7 +118,7 @@ public class SnapshotFSImageFormat {
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in, private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
FSImageFormat.Loader loader) throws IOException { FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot // 1. Read the id of the Snapshot root to identify the Snapshot
final Snapshot snapshot = loader.getSnapshot(in); final Snapshot snapshot = loader.getSnapshot(in);
// 2. Load file size // 2. Load file size
@ -128,7 +128,7 @@ public class SnapshotFSImageFormat {
final INodeFileAttributes snapshotINode = in.readBoolean()? final INodeFileAttributes snapshotINode = in.readBoolean()?
loader.loadINodeFileAttributes(in): null; loader.loadINodeFileAttributes(in): null;
return new FileDiff(snapshot, snapshotINode, posterior, fileSize); return new FileDiff(snapshot.getId(), snapshotINode, posterior, fileSize);
} }
/** /**
@ -149,7 +149,8 @@ public class SnapshotFSImageFormat {
} // else go to the next SnapshotDiff } // else go to the next SnapshotDiff
} }
// use the current child // use the current child
INode currentChild = parent.getChild(createdNodeName, null); INode currentChild = parent.getChild(createdNodeName,
Snapshot.CURRENT_STATE_ID);
if (currentChild == null) { if (currentChild == null) {
throw new IOException("Cannot find an INode associated with the INode " throw new IOException("Cannot find an INode associated with the INode "
+ DFSUtil.bytes2String(createdNodeName) + DFSUtil.bytes2String(createdNodeName)
@ -295,9 +296,9 @@ public class SnapshotFSImageFormat {
// 6. Compose the SnapshotDiff // 6. Compose the SnapshotDiff
List<DirectoryDiff> diffs = parent.getDiffs().asList(); List<DirectoryDiff> diffs = parent.getDiffs().asList();
DirectoryDiff sdiff = new DirectoryDiff(snapshot, snapshotINode, DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
diffs.isEmpty() ? null : diffs.get(0), diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
childrenSize, createdList, deletedList); deletedList, snapshotINode == snapshot.getRoot());
return sdiff; return sdiff;
} }

View File

@ -114,7 +114,7 @@ public class SnapshotManager implements SnapshotStats {
s = (INodeDirectorySnapshottable)d; s = (INodeDirectorySnapshottable)d;
s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT); s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
} else { } else {
s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot(), s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
fsdir.getINodeMap()); fsdir.getINodeMap());
} }
addSnapshottable(s); addSnapshottable(s);
@ -160,7 +160,7 @@ public class SnapshotManager implements SnapshotStats {
if (s == fsdir.getRoot()) { if (s == fsdir.getRoot()) {
s.setSnapshotQuota(0); s.setSnapshotQuota(0);
} else { } else {
s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap()); s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap());
} }
removeSnapshottable(s); removeSnapshottable(s);
} }
@ -324,7 +324,8 @@ public class SnapshotManager implements SnapshotStats {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus( SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(), dir.getModificationTime(), dir.getAccessTime(),
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(null), dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
dir.getNumSnapshots(), dir.getNumSnapshots(),
dir.getSnapshotQuota(), dir.getParent() == null ? dir.getSnapshotQuota(), dir.getParent() == null ?
DFSUtil.EMPTY_BYTES : DFSUtil.EMPTY_BYTES :

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.server.protocol;
* Utilization report for a Datanode storage * Utilization report for a Datanode storage
*/ */
public class StorageReport { public class StorageReport {
private final String storageID; private final DatanodeStorage storage;
private final boolean failed; private final boolean failed;
private final long capacity; private final long capacity;
private final long dfsUsed; private final long dfsUsed;
@ -30,9 +30,9 @@ public class StorageReport {
public static final StorageReport[] EMPTY_ARRAY = {}; public static final StorageReport[] EMPTY_ARRAY = {};
public StorageReport(String sid, boolean failed, long capacity, long dfsUsed, public StorageReport(DatanodeStorage storage, boolean failed,
long remaining, long bpUsed) { long capacity, long dfsUsed, long remaining, long bpUsed) {
this.storageID = sid; this.storage = storage;
this.failed = failed; this.failed = failed;
this.capacity = capacity; this.capacity = capacity;
this.dfsUsed = dfsUsed; this.dfsUsed = dfsUsed;
@ -40,8 +40,8 @@ public class StorageReport {
this.blockPoolUsed = bpUsed; this.blockPoolUsed = bpUsed;
} }
public String getStorageID() { public DatanodeStorage getStorage() {
return storageID; return storage;
} }
public boolean isFailed() { public boolean isFailed() {

View File

@ -196,12 +196,13 @@ message HeartbeatRequestProto {
} }
message StorageReportProto { message StorageReportProto {
required string storageUuid = 1; required string storageUuid = 1 [ deprecated = true ];
optional bool failed = 2 [ default = false ]; optional bool failed = 2 [ default = false ];
optional uint64 capacity = 3 [ default = 0 ]; optional uint64 capacity = 3 [ default = 0 ];
optional uint64 dfsUsed = 4 [ default = 0 ]; optional uint64 dfsUsed = 4 [ default = 0 ];
optional uint64 remaining = 5 [ default = 0 ]; optional uint64 remaining = 5 [ default = 0 ];
optional uint64 blockPoolUsed = 6 [ default = 0 ]; optional uint64 blockPoolUsed = 6 [ default = 0 ];
optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
} }
/** /**

View File

@ -140,6 +140,7 @@ public class MiniDFSCluster {
private int nameNodeHttpPort = 0; private int nameNodeHttpPort = 0;
private final Configuration conf; private final Configuration conf;
private int numDataNodes = 1; private int numDataNodes = 1;
private StorageType storageType = StorageType.DEFAULT;
private boolean format = true; private boolean format = true;
private boolean manageNameDfsDirs = true; private boolean manageNameDfsDirs = true;
private boolean manageNameDfsSharedDirs = true; private boolean manageNameDfsSharedDirs = true;
@ -185,6 +186,14 @@ public class MiniDFSCluster {
return this; return this;
} }
/**
* Default: StorageType.DEFAULT
*/
public Builder storageType(StorageType type) {
this.storageType = type;
return this;
}
/** /**
* Default: true * Default: true
*/ */
@ -341,6 +350,7 @@ public class MiniDFSCluster {
initMiniDFSCluster(builder.conf, initMiniDFSCluster(builder.conf,
builder.numDataNodes, builder.numDataNodes,
builder.storageType,
builder.format, builder.format,
builder.manageNameDfsDirs, builder.manageNameDfsDirs,
builder.manageNameDfsSharedDirs, builder.manageNameDfsSharedDirs,
@ -592,7 +602,7 @@ public class MiniDFSCluster {
String[] racks, String hosts[], String[] racks, String hosts[],
long[] simulatedCapacities) throws IOException { long[] simulatedCapacities) throws IOException {
this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
initMiniDFSCluster(conf, numDataNodes, format, initMiniDFSCluster(conf, numDataNodes, StorageType.DEFAULT, format,
manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
operation, racks, hosts, operation, racks, hosts,
simulatedCapacities, null, true, false, simulatedCapacities, null, true, false,
@ -601,7 +611,7 @@ public class MiniDFSCluster {
private void initMiniDFSCluster( private void initMiniDFSCluster(
Configuration conf, Configuration conf,
int numDataNodes, boolean format, boolean manageNameDfsDirs, int numDataNodes, StorageType storageType, boolean format, boolean manageNameDfsDirs,
boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
boolean manageDataDfsDirs, StartupOption operation, String[] racks, boolean manageDataDfsDirs, StartupOption operation, String[] racks,
String[] hosts, long[] simulatedCapacities, String clusterId, String[] hosts, long[] simulatedCapacities, String clusterId,
@ -670,7 +680,7 @@ public class MiniDFSCluster {
} }
// Start the DataNodes // Start the DataNodes
startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs, operation, racks,
hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig); hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
waitClusterUp(); waitClusterUp();
//make sure ProxyUsers uses the latest conf //make sure ProxyUsers uses the latest conf
@ -990,6 +1000,19 @@ public class MiniDFSCluster {
} }
} }
String makeDataNodeDirs(int dnIndex, StorageType storageType) throws IOException {
StringBuilder sb = new StringBuilder();
for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
File dir = getInstanceStorageDir(dnIndex, j);
dir.mkdirs();
if (!dir.isDirectory()) {
throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
}
sb.append((j > 0 ? "," : "") + "[" + storageType + "]" + fileAsURI(dir));
}
return sb.toString();
}
/** /**
* Modify the config and start up additional DataNodes. The info port for * Modify the config and start up additional DataNodes. The info port for
* DataNodes is guaranteed to use a free port. * DataNodes is guaranteed to use a free port.
@ -1052,7 +1075,7 @@ public class MiniDFSCluster {
String[] racks, String[] hosts, String[] racks, String[] hosts,
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile) throws IOException { boolean setupHostsFile) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts, startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
simulatedCapacities, setupHostsFile, false, false); simulatedCapacities, setupHostsFile, false, false);
} }
@ -1066,7 +1089,7 @@ public class MiniDFSCluster {
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile, boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException { boolean checkDataNodeAddrConfig) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts, startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false); simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false);
} }
@ -1098,7 +1121,7 @@ public class MiniDFSCluster {
* @throws IllegalStateException if NameNode has been shutdown * @throws IllegalStateException if NameNode has been shutdown
*/ */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation, StorageType storageType, boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts, String[] racks, String[] hosts,
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile, boolean setupHostsFile,
@ -1154,16 +1177,7 @@ public class MiniDFSCluster {
// Set up datanode address // Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig); setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) { if (manageDfsDirs) {
StringBuilder sb = new StringBuilder(); String dirs = makeDataNodeDirs(i, storageType);
for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
File dir = getInstanceStorageDir(i, j);
dir.mkdirs();
if (!dir.isDirectory()) {
throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
}
sb.append((j > 0 ? "," : "") + fileAsURI(dir));
}
String dirs = sb.toString();
dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
} }

View File

@ -50,7 +50,7 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
} }
public synchronized void startDataNodes(Configuration conf, int numDataNodes, public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation, StorageType storageType, boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] nodeGroups, String[] hosts, String[] racks, String[] nodeGroups, String[] hosts,
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile, boolean setupHostsFile,
@ -112,15 +112,7 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
// Set up datanode address // Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig); setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) { if (manageDfsDirs) {
File dir1 = getInstanceStorageDir(i, 0); String dirs = makeDataNodeDirs(i, storageType);
File dir2 = getInstanceStorageDir(i, 1);
dir1.mkdirs();
dir2.mkdirs();
if (!dir1.isDirectory() || !dir2.isDirectory()) {
throw new IOException("Mkdirs failed to create directory for DataNode "
+ i + ": " + dir1 + " or " + dir2);
}
String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
} }
@ -198,7 +190,7 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
String[] racks, String[] nodeGroups, String[] hosts, String[] racks, String[] nodeGroups, String[] hosts,
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile) throws IOException { boolean setupHostsFile) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, nodeGroups, startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, nodeGroups,
hosts, simulatedCapacities, setupHostsFile, false, false); hosts, simulatedCapacities, setupHostsFile, false, false);
} }
@ -213,13 +205,13 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
// This is for initialize from parent class. // This is for initialize from parent class.
@Override @Override
public synchronized void startDataNodes(Configuration conf, int numDataNodes, public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation, StorageType storageType, boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts, String[] racks, String[] hosts,
long[] simulatedCapacities, long[] simulatedCapacities,
boolean setupHostsFile, boolean setupHostsFile,
boolean checkDataNodeAddrConfig, boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig) throws IOException { boolean checkDataNodeHostConfig) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, startDataNodes(conf, numDataNodes, storageType, manageDfsDirs, operation, racks,
NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile, NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig); checkDataNodeAddrConfig, checkDataNodeHostConfig);
} }

View File

@ -257,8 +257,10 @@ public class BlockManagerTestUtil {
DatanodeDescriptor dnd) { DatanodeDescriptor dnd) {
ArrayList<StorageReport> reports = new ArrayList<StorageReport>(); ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
for (DatanodeStorageInfo storage : dnd.getStorageInfos()) { for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
DatanodeStorage dns = new DatanodeStorage(
storage.getStorageID(), storage.getState(), storage.getStorageType());
StorageReport report = new StorageReport( StorageReport report = new StorageReport(
storage.getStorageID(), false, storage.getCapacity(), dns ,false, storage.getCapacity(),
storage.getDfsUsed(), storage.getRemaining(), storage.getDfsUsed(), storage.getRemaining(),
storage.getBlockPoolUsed()); storage.getBlockPoolUsed());
reports.add(report); reports.add(report);

View File

@ -470,11 +470,14 @@ public class TestJspHelper {
BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1")); BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1"));
BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2")); BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2"));
DatanodeStorage dns1 = new DatanodeStorage("dnStorage1");
DatanodeStorage dns2 = new DatanodeStorage("dnStorage2");
StorageReport[] report1 = new StorageReport[] { StorageReport[] report1 = new StorageReport[] {
new StorageReport("dnStorage1", false, 1024, 100, 924, 100) new StorageReport(dns1, false, 1024, 100, 924, 100)
}; };
StorageReport[] report2 = new StorageReport[] { StorageReport[] report2 = new StorageReport[] {
new StorageReport("dnStorage2", false, 2500, 200, 1848, 200) new StorageReport(dns2, false, 2500, 200, 1848, 200)
}; };
dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2); dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2);
dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1); dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1);

View File

@ -394,8 +394,9 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
} }
synchronized StorageReport getStorageReport(String bpid) { synchronized StorageReport getStorageReport(String bpid) {
return new StorageReport(getStorageUuid(), false, getCapacity(), return new StorageReport(new DatanodeStorage(getStorageUuid()),
getUsed(), getFree(), map.get(bpid).getUsed()); false, getCapacity(), getUsed(), getFree(),
map.get(bpid).getUsed());
} }
} }

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.junit.After; import org.junit.After;
@ -186,9 +187,8 @@ public class TestDiskError {
// Check permissions on directories in 'dfs.datanode.data.dir' // Check permissions on directories in 'dfs.datanode.data.dir'
FileSystem localFS = FileSystem.getLocal(conf); FileSystem localFS = FileSystem.getLocal(conf);
for (DataNode dn : cluster.getDataNodes()) { for (DataNode dn : cluster.getDataNodes()) {
String[] dataDirs = for (FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
dn.getConf().getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); String dir = v.getBasePath();
for (String dir : dataDirs) {
Path dataDir = new Path(dir); Path dataDir = new Path(dir);
FsPermission actual = localFS.getFileStatus(dataDir).getPermission(); FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
assertEquals("Permission for dir: " + dataDir + ", is " + actual + assertEquals("Permission for dir: " + dataDir + ", is " + actual +

View File

@ -43,10 +43,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@ -109,8 +112,9 @@ public class TestFsDatasetCache {
public void setUp() throws Exception { public void setUp() throws Exception {
assumeTrue(!Path.WINDOWS); assumeTrue(!Path.WINDOWS);
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS, conf.setLong(
500); DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
CACHE_CAPACITY); CACHE_CAPACITY);
@ -328,7 +332,7 @@ public class TestFsDatasetCache {
// Create some test files that will exceed total cache capacity // Create some test files that will exceed total cache capacity
final int numFiles = 5; final int numFiles = 5;
final long fileSize = 15000; final long fileSize = CACHE_CAPACITY / (numFiles-1);
final Path[] testFiles = new Path[numFiles]; final Path[] testFiles = new Path[numFiles];
final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][]; final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
@ -477,4 +481,42 @@ public class TestFsDatasetCache {
setHeartbeatResponse(uncacheBlocks(locs)); setHeartbeatResponse(uncacheBlocks(locs));
verifyExpectedCacheUsage(0, 0); verifyExpectedCacheUsage(0, 0);
} }
@Test(timeout=60000)
public void testUncacheQuiesces() throws Exception {
// Create a file
Path fileName = new Path("/testUncacheQuiesces");
int fileLen = 4096;
DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
// Cache it
DistributedFileSystem dfs = cluster.getFileSystem();
dfs.addCachePool(new CachePoolInfo("pool"));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPool("pool").setPath(fileName).setReplication((short)3).build());
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
long blocksCached =
MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
return blocksCached > 0;
}
}, 1000, 30000);
// Uncache it
dfs.removeCacheDirective(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
long blocksUncached =
MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
return blocksUncached > 0;
}
}, 1000, 30000);
// Make sure that no additional messages were sent
Thread.sleep(10000);
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
MetricsAsserts.assertCounter("BlocksCached", 1l, dnMetrics);
MetricsAsserts.assertCounter("BlocksUncached", 1l, dnMetrics);
}
} }

View File

@ -0,0 +1,113 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
public class TestStorageReport {
public static final Log LOG = LogFactory.getLog(TestStorageReport.class);
private static short REPL_FACTOR = 1;
private static final StorageType storageType = StorageType.SSD; // pick non-default.
private static Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
static String bpid;
@Before
public void startUpCluster() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPL_FACTOR)
.storageType(storageType)
.build();
fs = cluster.getFileSystem();
bpid = cluster.getNamesystem().getBlockPoolId();
}
@After
public void shutDownCluster() throws IOException {
if (cluster != null) {
fs.close();
cluster.shutdown();
cluster = null;
}
}
/**
* Ensure that storage type and storage state are propagated
* in Storage Reports.
*/
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {
// Make sure we are not testing with the default type, that would not
// be a very good test.
assertNotSame(storageType, StorageType.DEFAULT);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a heartbeat so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerHeartbeat(dn);
// Verify that the callback passed in the expected parameters.
ArgumentCaptor<StorageReport[]> captor =
ArgumentCaptor.forClass(StorageReport[].class);
Mockito.verify(nnSpy).sendHeartbeat(
any(DatanodeRegistration.class),
captor.capture(),
anyLong(), anyLong(), anyInt(), anyInt(), anyInt());
StorageReport[] reports = captor.getValue();
for (StorageReport report: reports) {
assertThat(report.getStorage().getStorageType(), is(storageType));
assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
}
}
}

View File

@ -938,7 +938,7 @@ public class NNThroughputBenchmark implements Tool {
// register datanode // register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration); dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports //first block reports
storage = new DatanodeStorage(dnRegistration.getDatanodeUuid()); storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = { final StorageBlockReport[] reports = {
new StorageBlockReport(storage, new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs()) new BlockListAsLongs(null, null).getBlockListAsLongs())
@ -954,8 +954,8 @@ public class NNThroughputBenchmark implements Tool {
void sendHeartbeat() throws IOException { void sendHeartbeat() throws IOException {
// register datanode // register datanode
// TODO:FEDERATION currently a single block pool is supported // TODO:FEDERATION currently a single block pool is supported
StorageReport[] rep = { new StorageReport(dnRegistration.getDatanodeUuid(), StorageReport[] rep = { new StorageReport(storage, false,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) }; DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep, DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
0L, 0L, 0, 0, 0).getCommands(); 0L, 0L, 0, 0, 0).getCommands();
if(cmds != null) { if(cmds != null) {
@ -1001,7 +1001,7 @@ public class NNThroughputBenchmark implements Tool {
@SuppressWarnings("unused") // keep it for future blockReceived benchmark @SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException { int replicateBlocks() throws IOException {
// register datanode // register datanode
StorageReport[] rep = { new StorageReport(dnRegistration.getDatanodeUuid(), StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) }; false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0).getCommands(); rep, 0L, 0L, 0, 0, 0).getCommands();
@ -1010,7 +1010,8 @@ public class NNThroughputBenchmark implements Tool {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode // Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd; BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets()); return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
} }
} }
} }
@ -1023,12 +1024,14 @@ public class NNThroughputBenchmark implements Tool {
* that the blocks have been received. * that the blocks have been received.
*/ */
private int transferBlocks( Block blocks[], private int transferBlocks( Block blocks[],
DatanodeInfo xferTargets[][] DatanodeInfo xferTargets[][],
String targetStorageIDs[][]
) throws IOException { ) throws IOException {
for(int i = 0; i < blocks.length; i++) { for(int i = 0; i < blocks.length; i++) {
DatanodeInfo blockTargets[] = xferTargets[i]; DatanodeInfo blockTargets[] = xferTargets[i];
for(int t = 0; t < blockTargets.length; t++) { for(int t = 0; t < blockTargets.length; t++) {
DatanodeInfo dnInfo = blockTargets[t]; DatanodeInfo dnInfo = blockTargets[t];
String targetStorageID = targetStorageIDs[i][t];
DatanodeRegistration receivedDNReg; DatanodeRegistration receivedDNReg;
receivedDNReg = new DatanodeRegistration(dnInfo, receivedDNReg = new DatanodeRegistration(dnInfo,
new DataStorage(nsInfo), new DataStorage(nsInfo),
@ -1038,7 +1041,7 @@ public class NNThroughputBenchmark implements Tool {
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
null) }; null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks( StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
receivedDNReg.getDatanodeUuid(), rdBlocks) }; targetStorageID, rdBlocks) };
nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
.getNamesystem().getBlockPoolId(), report); .getNamesystem().getBlockPoolId(), report);
} }
@ -1127,7 +1130,7 @@ public class NNThroughputBenchmark implements Tool {
} }
// create files // create files
LOG.info("Creating " + nrFiles + " with " + blocksPerFile + " blocks each."); LOG.info("Creating " + nrFiles + " files with " + blocksPerFile + " blocks each.");
FileNameGenerator nameGenerator; FileNameGenerator nameGenerator;
nameGenerator = new FileNameGenerator(getBaseDir(), 100); nameGenerator = new FileNameGenerator(getBaseDir(), 100);
String clientName = getClientName(007); String clientName = getClientName(007);
@ -1161,7 +1164,7 @@ public class NNThroughputBenchmark implements Tool {
loc.getBlock().getLocalBlock(), loc.getBlock().getLocalBlock(),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) }; ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks( StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
datanodes[dnIdx].dnRegistration.getDatanodeUuid(), rdBlocks) }; datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
.getBlock().getBlockPoolId(), report); .getBlock().getBlockPoolId(), report);
} }

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Iterator; import java.util.Iterator;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -29,25 +28,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
/** /**
* OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer, * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
@ -135,151 +122,11 @@ public class OfflineEditsViewerHelper {
* OP_CLEAR_NS_QUOTA (12) * OP_CLEAR_NS_QUOTA (12)
*/ */
private CheckpointSignature runOperations() throws IOException { private CheckpointSignature runOperations() throws IOException {
LOG.info("Creating edits by performing fs operations"); LOG.info("Creating edits by performing fs operations");
// no check, if it's not it throws an exception which is what we want // no check, if it's not it throws an exception which is what we want
DistributedFileSystem dfs = DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
(DistributedFileSystem)cluster.getFileSystem(); DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0),
FileContext fc = FileContext.getFileContext(cluster.getURI(0), config); dfs.getDefaultBlockSize(), 0);
// OP_ADD 0
Path pathFileCreate = new Path("/file_create_u\1F431");
FSDataOutputStream s = dfs.create(pathFileCreate);
// OP_CLOSE 9
s.close();
// OP_RENAME_OLD 1
Path pathFileMoved = new Path("/file_moved");
dfs.rename(pathFileCreate, pathFileMoved);
// OP_DELETE 2
dfs.delete(pathFileMoved, false);
// OP_MKDIR 3
Path pathDirectoryMkdir = new Path("/directory_mkdir");
dfs.mkdirs(pathDirectoryMkdir);
// OP_ALLOW_SNAPSHOT 29
dfs.allowSnapshot(pathDirectoryMkdir);
// OP_DISALLOW_SNAPSHOT 30
dfs.disallowSnapshot(pathDirectoryMkdir);
// OP_CREATE_SNAPSHOT 26
String ssName = "snapshot1";
dfs.allowSnapshot(pathDirectoryMkdir);
dfs.createSnapshot(pathDirectoryMkdir, ssName);
// OP_RENAME_SNAPSHOT 28
String ssNewName = "snapshot2";
dfs.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
// OP_DELETE_SNAPSHOT 27
dfs.deleteSnapshot(pathDirectoryMkdir, ssNewName);
// OP_SET_REPLICATION 4
s = dfs.create(pathFileCreate);
s.close();
dfs.setReplication(pathFileCreate, (short)1);
// OP_SET_PERMISSIONS 7
Short permission = 0777;
dfs.setPermission(pathFileCreate, new FsPermission(permission));
// OP_SET_OWNER 8
dfs.setOwner(pathFileCreate, new String("newOwner"), null);
// OP_CLOSE 9 see above
// OP_SET_GENSTAMP 10 see above
// OP_SET_NS_QUOTA 11 obsolete
// OP_CLEAR_NS_QUOTA 12 obsolete
// OP_TIMES 13
long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
long atime = mtime;
dfs.setTimes(pathFileCreate, mtime, atime);
// OP_SET_QUOTA 14
dfs.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET);
// OP_RENAME 15
fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
// OP_CONCAT_DELETE 16
Path pathConcatTarget = new Path("/file_concat_target");
Path[] pathConcatFiles = new Path[2];
pathConcatFiles[0] = new Path("/file_concat_0");
pathConcatFiles[1] = new Path("/file_concat_1");
long length = blockSize * 3; // multiple of blocksize for concat
short replication = 1;
long seed = 1;
DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed);
DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed);
DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed);
dfs.concat(pathConcatTarget, pathConcatFiles);
// OP_SYMLINK 17
Path pathSymlink = new Path("/file_symlink");
fc.createSymlink(pathConcatTarget, pathSymlink, false);
// OP_GET_DELEGATION_TOKEN 18
// OP_RENEW_DELEGATION_TOKEN 19
// OP_CANCEL_DELEGATION_TOKEN 20
// see TestDelegationToken.java
// fake the user to renew token for
final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", null);
UserGroupInformation longUgi = UserGroupInformation.createRemoteUser(
"JobTracker/foo.com@FOO.COM");
try {
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException, InterruptedException {
for (Token<?> token : tokens) {
token.renew(config);
token.cancel(config);
}
return null;
}
});
} catch(InterruptedException e) {
throw new IOException(
"renewDelegationToken threw InterruptedException", e);
}
// OP_UPDATE_MASTER_KEY 21
// done by getDelegationTokenSecretManager().startThreads();
// OP_ADD_CACHE_POOL 35
final String pool = "poolparty";
dfs.addCachePool(new CachePoolInfo(pool));
// OP_MODIFY_CACHE_POOL 36
dfs.modifyCachePool(new CachePoolInfo(pool)
.setOwnerName("carlton")
.setGroupName("party")
.setMode(new FsPermission((short)0700))
.setLimit(1989l));
// OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/bar")).
setReplication((short)1).
setPool(pool).
build());
// OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 38
dfs.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setPath(new Path("/bar2")).
build());
// OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE 34
dfs.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL 37
dfs.removeCachePool(pool);
// sync to disk, otherwise we parse partial edits
cluster.getNameNode().getFSImage().getEditLog().logSync();
// OP_REASSIGN_LEASE 22
String filePath = "/hard-lease-recovery-test";
byte[] bytes = "foo-bar-baz".getBytes();
DFSClientAdapter.stopLeaseRenewer(dfs);
FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
leaseRecoveryPath.write(bytes);
leaseRecoveryPath.hflush();
// Set the hard lease timeout to 1 second.
cluster.setLeasePeriod(60 * 1000, 1000);
// wait for lease recovery to complete
LocatedBlocks locatedBlocks;
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOG.info("Innocuous exception", e);
}
locatedBlocks = DFSClientAdapter.callGetBlockLocations(
cluster.getNameNodeRpc(), filePath, 0L, bytes.length);
} while (locatedBlocks.isUnderConstruction());
// Force a roll so we get an OP_END_LOG_SEGMENT txn // Force a roll so we get an OP_END_LOG_SEGMENT txn
return cluster.getNameNodeRpc().rollEditLog(); return cluster.getNameNodeRpc().rollEditLog();

View File

@ -57,17 +57,18 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.CachePoolStats; import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
@ -81,6 +82,7 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.GSet;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -603,8 +605,8 @@ public class TestCacheDirectives {
* Wait for the NameNode to have an expected number of cached blocks * Wait for the NameNode to have an expected number of cached blocks
* and replicas. * and replicas.
* @param nn NameNode * @param nn NameNode
* @param expectedCachedBlocks * @param expectedCachedBlocks if -1, treat as wildcard
* @param expectedCachedReplicas * @param expectedCachedReplicas if -1, treat as wildcard
* @throws Exception * @throws Exception
*/ */
private static void waitForCachedBlocks(NameNode nn, private static void waitForCachedBlocks(NameNode nn,
@ -633,16 +635,18 @@ public class TestCacheDirectives {
} finally { } finally {
namesystem.readUnlock(); namesystem.readUnlock();
} }
if ((numCachedBlocks == expectedCachedBlocks) && if (expectedCachedBlocks == -1 ||
(numCachedReplicas == expectedCachedReplicas)) { numCachedBlocks == expectedCachedBlocks) {
return true; if (expectedCachedReplicas == -1 ||
} else { numCachedReplicas == expectedCachedReplicas) {
LOG.info(logString + " cached blocks: have " + numCachedBlocks + return true;
" / " + expectedCachedBlocks + ". " + }
"cached replicas: have " + numCachedReplicas +
" / " + expectedCachedReplicas);
return false;
} }
LOG.info(logString + " cached blocks: have " + numCachedBlocks +
" / " + expectedCachedBlocks + ". " +
"cached replicas: have " + numCachedReplicas +
" / " + expectedCachedReplicas);
return false;
} }
}, 500, 60000); }, 500, 60000);
} }
@ -1351,4 +1355,39 @@ public class TestCacheDirectives {
.setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)) .setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1))
.build()); .build());
} }
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
// Create a giant file
final Path fileName = new Path("/exceeds");
final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
0xFADED);
// Set up a log appender watcher
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
dfs.addCachePool(new CachePoolInfo("pool"));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
.setPath(fileName).setReplication((short) 1).build());
waitForCachedBlocks(namenode, -1, numCachedReplicas,
"testExceeds:1");
// Check that no DNs saw an excess CACHE message
int lines = appender.countLinesWithMessage(
"more bytes in the cache: " +
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
assertEquals("Namenode should not send extra CACHE commands", 0, lines);
// Try creating a file with giant-sized blocks that exceed cache capacity
dfs.delete(fileName, false);
DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
(short) 1, 0xFADED);
// Nothing will get cached, so just force sleep for a bit
Thread.sleep(4000);
// Still should not see any excess commands
lines = appender.countLinesWithMessage(
"more bytes in the cache: " +
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
assertEquals("Namenode should not send extra CACHE commands", 0, lines);
}
} }

View File

@ -140,8 +140,9 @@ public class TestDeadDatanode {
// Ensure heartbeat from dead datanode is rejected with a command // Ensure heartbeat from dead datanode is rejected with a command
// that asks datanode to register again // that asks datanode to register again
StorageReport[] rep = { new StorageReport(reg.getDatanodeUuid(), false, 0, 0, StorageReport[] rep = { new StorageReport(
0, 0) }; new DatanodeStorage(reg.getDatanodeUuid()),
false, 0, 0, 0, 0) };
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0) DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0)
.getCommands(); .getCommands();
assertEquals(1, cmd.length); assertEquals(1, cmd.length);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -119,7 +120,7 @@ public class TestFSDirectory {
fsdir.reset(); fsdir.reset();
Assert.assertFalse(fsdir.isReady()); Assert.assertFalse(fsdir.isReady());
final INodeDirectory root = (INodeDirectory) fsdir.getINode("/"); final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
Assert.assertTrue(root.getChildrenList(null).isEmpty()); Assert.assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
fsdir.imageLoadComplete(); fsdir.imageLoadComplete();
Assert.assertTrue(fsdir.isReady()); Assert.assertTrue(fsdir.isReady());
} }

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -195,11 +197,12 @@ public class TestFSImageWithSnapshot {
INodeDirectorySnapshottable rootNode = INodeDirectorySnapshottable rootNode =
(INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString()); (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
assertTrue("The children list of root should be empty", assertTrue("The children list of root should be empty",
rootNode.getChildrenList(null).isEmpty()); rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
// one snapshot on root: s1 // one snapshot on root: s1
List<DirectoryDiff> diffList = rootNode.getDiffs().asList(); List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
assertEquals(1, diffList.size()); assertEquals(1, diffList.size());
assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName()); Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
// check SnapshotManager's snapshottable directory list // check SnapshotManager's snapshottable directory list
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs()); assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());

View File

@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -781,7 +782,7 @@ public class TestINodeFile {
} }
System.out.println("Adding component " + DFSUtil.bytes2String(component)); System.out.println("Adding component " + DFSUtil.bytes2String(component));
dir = new INodeDirectory(++id, component, permstatus, 0); dir = new INodeDirectory(++id, component, permstatus, 0);
prev.addChild(dir, false, null); prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
prev = dir; prev = dir;
} }
return dir; // Last Inode in the chain return dir; // Last Inode in the chain

View File

@ -123,8 +123,12 @@ public class TestSnapshotPathINodes {
final Snapshot snapshot, int index) { final Snapshot snapshot, int index) {
assertEquals(isSnapshot, inodesInPath.isSnapshot()); assertEquals(isSnapshot, inodesInPath.isSnapshot());
assertEquals(index, inodesInPath.getSnapshotRootIndex()); assertEquals(index, inodesInPath.getSnapshotRootIndex());
assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot()); assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null),
assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot()); inodesInPath.getPathSnapshotId());
if (!isSnapshot) {
assertEquals(Snapshot.getSnapshotId(snapshot),
inodesInPath.getLatestSnapshotId());
}
if (isSnapshot && index >= 0) { if (isSnapshot && index >= 0) {
assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass()); assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass());
} }
@ -424,7 +428,7 @@ public class TestSnapshotPathINodes {
// The modification time of the snapshot INode should be the same with the // The modification time of the snapshot INode should be the same with the
// original INode before modification // original INode before modification
assertEquals(modTime, assertEquals(modTime,
snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshot())); snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
// Check the INode for /TestSnapshot/sub1/file1 again // Check the INode for /TestSnapshot/sub1/file1 again
names = INode.getPathNames(file1.toString()); names = INode.getPathNames(file1.toString());

View File

@ -47,6 +47,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -108,6 +110,12 @@ public class TestNameNodeMetrics {
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
MetricsSource source = DefaultMetricsSystem.instance().getSource("UgiMetrics");
if (source != null) {
// Run only once since the UGI metrics is cleaned up during teardown
MetricsRecordBuilder rb = getMetrics(source);
assertQuantileGauges("GetGroups1s", rb);
}
cluster.shutdown(); cluster.shutdown();
} }

View File

@ -270,7 +270,8 @@ public class SnapshotTestHelper {
public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{ public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
final PrintWriter out = new PrintWriter(new FileWriter(f, false), true); final PrintWriter out = new PrintWriter(new FileWriter(f, false), true);
fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(), null); fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
out.close(); out.close();
} }

View File

@ -156,7 +156,6 @@ public class TestINodeFileUnderConstructionWithSnapshot {
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString()); .getINode(dir.toString());
DirectoryDiff last = dirNode.getDiffs().getLast(); DirectoryDiff last = dirNode.getDiffs().getLast();
Snapshot s0 = last.snapshot;
// 2. append without closing stream // 2. append without closing stream
out = appendFileWithoutClosing(file, BLOCKSIZE); out = appendFileWithoutClosing(file, BLOCKSIZE);
@ -164,7 +163,7 @@ public class TestINodeFileUnderConstructionWithSnapshot {
// re-check nodeInDeleted_S0 // re-check nodeInDeleted_S0
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0)); assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
// 3. take snapshot --> close stream // 3. take snapshot --> close stream
hdfs.createSnapshot(dir, "s1"); hdfs.createSnapshot(dir, "s1");
@ -175,9 +174,8 @@ public class TestINodeFileUnderConstructionWithSnapshot {
fileNode = (INodeFile) fsdir.getINode(file.toString()); fileNode = (INodeFile) fsdir.getINode(file.toString());
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
last = dirNode.getDiffs().getLast(); last = dirNode.getDiffs().getLast();
Snapshot s1 = last.snapshot;
assertTrue(fileNode.isWithSnapshot()); assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1)); assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
// 4. modify file --> append without closing stream --> take snapshot --> // 4. modify file --> append without closing stream --> take snapshot -->
// close stream // close stream
@ -187,7 +185,7 @@ public class TestINodeFileUnderConstructionWithSnapshot {
out.close(); out.close();
// re-check the size of nodeInDeleted_S1 // re-check the size of nodeInDeleted_S1
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1)); assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
} }
/** /**

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
@ -72,6 +73,7 @@ import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
;
/** Testing rename with snapshots. */ /** Testing rename with snapshots. */
public class TestRenameWithSnapshots { public class TestRenameWithSnapshots {
@ -402,9 +404,11 @@ public class TestRenameWithSnapshots {
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo"); "foo");
assertFalse(hdfs.exists(foo_s3)); assertFalse(hdfs.exists(foo_s3));
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile(); INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
assertEquals("s2", sfoo.getDiffs().getLastSnapshot().getRoot() assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
.getLocalName());
} }
/** /**
@ -604,8 +608,10 @@ public class TestRenameWithSnapshots {
INodeFile snode = fsdir.getINode(newfoo.toString()).asFile(); INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1, snode.getDiffs().asList().size()); assertEquals(1, snode.getDiffs().asList().size());
assertEquals("s2", snode.getDiffs().getLastSnapshot().getRoot() INodeDirectorySnapshottable sdir2Node =
.getLocalName()); (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
// restart cluster // restart cluster
restartClusterAndCheckImage(true); restartClusterAndCheckImage(true);
@ -758,12 +764,14 @@ public class TestRenameWithSnapshots {
assertEquals(2, fooWithCount.getReferenceCount()); assertEquals(2, fooWithCount.getReferenceCount());
INodeDirectory foo = fooWithCount.asDirectory(); INodeDirectory foo = fooWithCount.asDirectory();
assertEquals(1, foo.getDiffs().asList().size()); assertEquals(1, foo.getDiffs().asList().size());
assertEquals("s1", foo.getDirectoryWithSnapshotFeature().getLastSnapshot() INodeDirectorySnapshottable sdir1Node =
.getRoot().getLocalName()); (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature()
.getLastSnapshotId());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile(); INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1, bar1.getDiffs().asList().size()); assertEquals(1, bar1.getDiffs().asList().size());
assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot() assertEquals(s1.getId(), bar1.getDiffs().getLastSnapshotId());
.getLocalName());
INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString()) INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString())
.asReference(); .asReference();
@ -772,8 +780,7 @@ public class TestRenameWithSnapshots {
assertEquals(2, barWithCount.getReferenceCount()); assertEquals(2, barWithCount.getReferenceCount());
INodeFile bar = barWithCount.asFile(); INodeFile bar = barWithCount.asFile();
assertEquals(1, bar.getDiffs().asList().size()); assertEquals(1, bar.getDiffs().asList().size());
assertEquals("s1", bar.getDiffs().getLastSnapshot().getRoot() assertEquals(s1.getId(), bar.getDiffs().getLastSnapshotId());
.getLocalName());
// restart the cluster and check fsimage // restart the cluster and check fsimage
restartClusterAndCheckImage(true); restartClusterAndCheckImage(true);
@ -967,6 +974,13 @@ public class TestRenameWithSnapshots {
hdfs.rename(bar_dir2, bar_dir1); hdfs.rename(bar_dir2, bar_dir1);
// check the internal details // check the internal details
INodeDirectorySnapshottable sdir1Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
INodeDirectorySnapshottable sdir3Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir3.toString());
INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString()) INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString())
.asReference(); .asReference();
INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode(); INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
@ -975,16 +989,22 @@ public class TestRenameWithSnapshots {
INodeDirectory foo = fooWithCount.asDirectory(); INodeDirectory foo = fooWithCount.asDirectory();
List<DirectoryDiff> fooDiffs = foo.getDiffs().asList(); List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size()); assertEquals(4, fooDiffs.size());
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
assertEquals("s333", fooDiffs.get(2).snapshot.getRoot().getLocalName()); Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
assertEquals("s22", fooDiffs.get(1).snapshot.getRoot().getLocalName()); Snapshot s333 = sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); Snapshot s22 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(), fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile(); INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List<FileDiff> bar1Diffs = bar1.getDiffs().asList(); List<FileDiff> bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size()); assertEquals(3, bar1Diffs.size());
assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName()); assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
assertEquals("s22", bar1Diffs.get(1).snapshot.getRoot().getLocalName()); assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId());
assertEquals("s1", bar1Diffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), bar1Diffs.get(0).getSnapshotId());
INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString()) INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString())
.asReference(); .asReference();
@ -994,10 +1014,10 @@ public class TestRenameWithSnapshots {
INodeFile bar = barWithCount.asFile(); INodeFile bar = barWithCount.asFile();
List<FileDiff> barDiffs = bar.getDiffs().asList(); List<FileDiff> barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size()); assertEquals(4, barDiffs.size());
assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName()); assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
assertEquals("s333", barDiffs.get(2).snapshot.getRoot().getLocalName()); assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId());
assertEquals("s22", barDiffs.get(1).snapshot.getRoot().getLocalName()); assertEquals(s22.getId(), barDiffs.get(1).getSnapshotId());
assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), barDiffs.get(0).getSnapshotId());
// restart the cluster and check fsimage // restart the cluster and check fsimage
restartClusterAndCheckImage(true); restartClusterAndCheckImage(true);
@ -1033,10 +1053,10 @@ public class TestRenameWithSnapshots {
foo = fooWithCount.asDirectory(); foo = fooWithCount.asDirectory();
fooDiffs = foo.getDiffs().asList(); fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size()); assertEquals(4, fooDiffs.size());
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName()); assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
bar1Diffs = bar1.getDiffs().asList(); bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size()); assertEquals(3, bar1Diffs.size());
assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName()); assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
barRef = fsdir.getINode(bar_s2222.toString()).asReference(); barRef = fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount = (WithCount) barRef.getReferredINode(); barWithCount = (WithCount) barRef.getReferredINode();
@ -1044,7 +1064,7 @@ public class TestRenameWithSnapshots {
bar = barWithCount.asFile(); bar = barWithCount.asFile();
barDiffs = bar.getDiffs().asList(); barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size()); assertEquals(4, barDiffs.size());
assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName()); assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
} }
/** /**
@ -1164,6 +1184,9 @@ public class TestRenameWithSnapshots {
assertTrue(hdfs.exists(bar_s2)); assertTrue(hdfs.exists(bar_s2));
// check internal details // check internal details
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo"); final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference(); INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName); assertTrue(fooRef instanceof INodeReference.WithName);
@ -1172,7 +1195,7 @@ public class TestRenameWithSnapshots {
INodeDirectory fooDir = fooWC.getReferredINode().asDirectory(); INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
List<DirectoryDiff> diffs = fooDir.getDiffs().asList(); List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
assertEquals(1, diffs.size()); assertEquals(1, diffs.size());
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
// restart the cluster and check fsimage // restart the cluster and check fsimage
restartClusterAndCheckImage(true); restartClusterAndCheckImage(true);
@ -1260,7 +1283,7 @@ public class TestRenameWithSnapshots {
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2); INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject()); Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap()); root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
@ -1271,12 +1294,14 @@ public class TestRenameWithSnapshots {
// check the current internal details // check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString()); .getINode4Write(sdir1.toString());
ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null); Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size()); assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName()); assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList(); List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size()); assertEquals(1, dir1Diffs.size());
assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, both the created and deleted list of sdir1 // after the undo of rename, both the created and deleted list of sdir1
// should be empty // should be empty
@ -1288,7 +1313,7 @@ public class TestRenameWithSnapshots {
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot()); assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList(); List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size()); assertEquals(1, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo"); final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
INode fooNode_s1 = fsdir.getINode(foo_s1.toString()); INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
@ -1299,7 +1324,8 @@ public class TestRenameWithSnapshots {
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()) INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory(); .asDirectory();
assertFalse(dir2Node.isWithSnapshot()); assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null); ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size()); assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName()); assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
} }
@ -1327,7 +1353,7 @@ public class TestRenameWithSnapshots {
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2); INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject()); Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap()); root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
@ -1338,12 +1364,14 @@ public class TestRenameWithSnapshots {
// check the current internal details // check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString()); .getINode4Write(sdir1.toString());
ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null); Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size()); assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName()); assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList(); List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size()); assertEquals(1, dir1Diffs.size());
assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, the created list of sdir1 should contain // after the undo of rename, the created list of sdir1 should contain
// 1 element // 1 element
@ -1363,7 +1391,8 @@ public class TestRenameWithSnapshots {
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()) INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory(); .asDirectory();
assertFalse(dir2Node.isWithSnapshot()); assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null); ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size()); assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName()); assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
} }
@ -1389,7 +1418,7 @@ public class TestRenameWithSnapshots {
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3); INodeDirectory mockDir3 = spy(dir3);
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(), doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject()); Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap()); root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
@ -1400,13 +1429,18 @@ public class TestRenameWithSnapshots {
assertFalse(result); assertFalse(result);
// check the current internal details // check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir2.toString()); .getINode4Write(sdir2.toString());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null); Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size()); assertEquals(1, dir2Children.size());
List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList(); List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(1, dir2Diffs.size()); assertEquals(1, dir2Diffs.size());
assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot)); assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff(); ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size()); assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
@ -1418,7 +1452,7 @@ public class TestRenameWithSnapshots {
assertTrue(fooNode instanceof INodeReference.DstReference); assertTrue(fooNode instanceof INodeReference.DstReference);
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList(); List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size()); assertEquals(1, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
// create snapshot on sdir2 and rename again // create snapshot on sdir2 and rename again
hdfs.createSnapshot(sdir2, "s3"); hdfs.createSnapshot(sdir2, "s3");
@ -1428,13 +1462,14 @@ public class TestRenameWithSnapshots {
// check internal details again // check internal details again
dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2 dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2
.toString()); .toString());
Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode = fsdir.getINode4Write(foo_dir2.toString()); fooNode = fsdir.getINode4Write(foo_dir2.toString());
dir2Children = dir2Node.getChildrenList(null); dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size()); assertEquals(1, dir2Children.size());
dir2Diffs = dir2Node.getDiffs().asList(); dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(2, dir2Diffs.size()); assertEquals(2, dir2Diffs.size());
assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot)); assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
assertEquals("s3", Snapshot.getSnapshotName(dir2Diffs.get(1).snapshot)); assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
childrenDiff = dir2Diffs.get(0).getChildrenDiff(); childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
@ -1452,8 +1487,8 @@ public class TestRenameWithSnapshots {
assertTrue(fooNode instanceof INodeReference.DstReference); assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs = fooNode.asDirectory().getDiffs().asList(); fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(2, fooDiffs.size()); assertEquals(2, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName()); assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
} }
/** /**
@ -1489,9 +1524,9 @@ public class TestRenameWithSnapshots {
INodeDirectory mockDir3 = spy(dir3); INodeDirectory mockDir3 = spy(dir3);
// fail the rename but succeed in undo // fail the rename but succeed in undo
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(), doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
anyBoolean(), (Snapshot) anyObject()); anyBoolean(), Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(),
(Snapshot) anyObject())).thenReturn(false).thenCallRealMethod(); Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap()); root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
foo3Node.setParent(mockDir3); foo3Node.setParent(mockDir3);
@ -1551,7 +1586,7 @@ public class TestRenameWithSnapshots {
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString()) INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
.asDirectory(); .asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
.getChildrenList(null)); .getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size()); assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0); INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot()); assertTrue(fooNode.asDirectory().isWithSnapshot());
@ -1572,7 +1607,7 @@ public class TestRenameWithSnapshots {
assertEquals(3, counts.get(Quota.NAMESPACE)); assertEquals(3, counts.get(Quota.NAMESPACE));
assertEquals(0, counts.get(Quota.DISKSPACE)); assertEquals(0, counts.get(Quota.DISKSPACE));
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory() childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
.getChildrenList(null)); .getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size()); assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0); INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent()); assertSame(dir2Node, subdir2Node.getParent());
@ -1627,7 +1662,7 @@ public class TestRenameWithSnapshots {
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString()) INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
.asDirectory(); .asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
.getChildrenList(null)); .getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size()); assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0); INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot()); assertTrue(fooNode.asDirectory().isWithSnapshot());
@ -1646,7 +1681,7 @@ public class TestRenameWithSnapshots {
assertEquals(4, counts.get(Quota.NAMESPACE)); assertEquals(4, counts.get(Quota.NAMESPACE));
assertEquals(0, counts.get(Quota.DISKSPACE)); assertEquals(0, counts.get(Quota.DISKSPACE));
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory() childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
.getChildrenList(null)); .getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size()); assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0); INode subdir2Node = childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot()); assertTrue(subdir2Node.asDirectory().isWithSnapshot());
@ -1690,14 +1725,18 @@ public class TestRenameWithSnapshots {
} }
// check // check
INodeDirectorySnapshottable rootNode = (INodeDirectorySnapshottable) fsdir
.getINode4Write(root.toString());
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory(); INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null); ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size()); assertEquals(1, children.size());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList(); List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size()); assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0); DirectoryDiff diff = diffList.get(0);
// this diff is generated while renaming // this diff is generated while renaming
assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot)); Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(), diff.getSnapshotId());
// after undo, the diff should be empty // after undo, the diff should be empty
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
@ -1709,7 +1748,7 @@ public class TestRenameWithSnapshots {
List<FileDiff> barDiffList = barNode.getDiffs().asList(); List<FileDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size()); assertEquals(1, barDiffList.size());
FileDiff barDiff = barDiffList.get(0); FileDiff barDiff = barDiffList.get(0);
assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot)); assertEquals(s1.getId(), barDiff.getSnapshotId());
// restart cluster multiple times to make sure the fsimage and edits log are // restart cluster multiple times to make sure the fsimage and edits log are
// correct. Note that when loading fsimage, foo and bar will be converted // correct. Note that when loading fsimage, foo and bar will be converted
@ -1941,12 +1980,14 @@ public class TestRenameWithSnapshots {
(WithCount) fooRef.asReference().getReferredINode(); (WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount()); assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory(); INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null); ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size()); assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName()); assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList(); List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size()); assertEquals(1, diffList.size());
assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot)); Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff(); ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size()); assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size()); assertEquals(0, diff.getList(ListType.DELETED).size());
@ -2009,14 +2050,16 @@ public class TestRenameWithSnapshots {
(WithCount) fooRef.asReference().getReferredINode(); (WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount()); assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory(); INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null); ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size()); assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName()); assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName()); assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName()); assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList(); List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size()); assertEquals(1, diffList.size());
assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot)); Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff(); ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list // bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size()); assertEquals(2, diff.getList(ListType.CREATED).size());
@ -2134,11 +2177,12 @@ public class TestRenameWithSnapshots {
// recordModification before the rename // recordModification before the rename
assertTrue(fooNode.isWithSnapshot()); assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty()); assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory(); INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
.get(0).asDirectory();
// bar should also be INodeDirectory (With Snapshot), and both of its diff // bar should also be INodeDirectory (With Snapshot), and both of its diff
// list and children list are empty // list and children list are empty
assertTrue(barNode.getDiffs().asList().isEmpty()); assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(null).isEmpty()); assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
restartClusterAndCheckImage(true); restartClusterAndCheckImage(true);
} }
@ -2210,7 +2254,10 @@ public class TestRenameWithSnapshots {
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList(); List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size()); assertEquals(1, barDiffList.size());
DirectoryDiff diff = barDiffList.get(0); DirectoryDiff diff = barDiffList.get(0);
assertEquals("s0", Snapshot.getSnapshotName(diff.snapshot)); INodeDirectorySnapshottable testNode =
(INodeDirectorySnapshottable) fsdir.getINode4Write(test.toString());
Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(), diff.getSnapshotId());
// and file should be stored in the deleted list of this snapshot diff // and file should be stored in the deleted list of this snapshot diff
assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED) assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED)
.get(0).getLocalName()); .get(0).getLocalName());
@ -2276,7 +2323,7 @@ public class TestRenameWithSnapshots {
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0", final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
"foo/bar"); "foo/bar");
INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory(); INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0, barNode.getChildrenList(null).size()); assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List<DirectoryDiff> diffList = barNode.getDiffs().asList(); List<DirectoryDiff> diffList = barNode.getDiffs().asList();
assertEquals(1, diffList.size()); assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0); DirectoryDiff diff = diffList.get(0);

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -153,7 +154,9 @@ public class TestSetQuotaWithSnapshot {
assertTrue(subNode.asDirectory().isWithSnapshot()); assertTrue(subNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList(); List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
assertEquals(1, diffList.size()); assertEquals(1, diffList.size());
assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot)); Snapshot s2 = ((INodeDirectorySnapshottable) dirNode).getSnapshot(DFSUtil
.string2Bytes("s2"));
assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED); List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, createdList.size()); assertEquals(1, createdList.size());
assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0)); assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));

View File

@ -268,7 +268,8 @@ public class TestSnapshotDeletion {
(INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString()); (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
// should still be an INodeDirectory // should still be an INodeDirectory
assertEquals(INodeDirectory.class, snapshotNode.getClass()); assertEquals(INodeDirectory.class, snapshotNode.getClass());
ReadOnlyList<INode> children = snapshotNode.getChildrenList(null); ReadOnlyList<INode> children = snapshotNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
// check 2 children: noChangeFile and metaChangeFile2 // check 2 children: noChangeFile and metaChangeFile2
assertEquals(2, children.size()); assertEquals(2, children.size());
INode noChangeFileSCopy = children.get(1); INode noChangeFileSCopy = children.get(1);
@ -286,11 +287,11 @@ public class TestSnapshotDeletion {
// check the replication factor of metaChangeFile2SCopy // check the replication factor of metaChangeFile2SCopy
assertEquals(REPLICATION_1, assertEquals(REPLICATION_1,
metaChangeFile2SCopy.getFileReplication(null)); metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1, assertEquals(REPLICATION_1,
metaChangeFile2SCopy.getFileReplication(snapshot1)); metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION, assertEquals(REPLICATION,
metaChangeFile2SCopy.getFileReplication(snapshot0)); metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
// Case 4: delete directory sub // Case 4: delete directory sub
// before deleting sub, we first create a new file under sub // before deleting sub, we first create a new file under sub
@ -316,23 +317,25 @@ public class TestSnapshotDeletion {
assertTrue(snapshotNode4Sub.isWithSnapshot()); assertTrue(snapshotNode4Sub.isWithSnapshot());
// the snapshot copy of sub has only one child subsub. // the snapshot copy of sub has only one child subsub.
// newFile should have been destroyed // newFile should have been destroyed
assertEquals(1, snapshotNode4Sub.getChildrenList(null).size()); assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID)
.size());
// but should have two children, subsub and noChangeDir, when s1 was taken // but should have two children, subsub and noChangeDir, when s1 was taken
assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1).size()); assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
// check the snapshot copy of subsub, which is contained in the subtree of // check the snapshot copy of subsub, which is contained in the subtree of
// sub's snapshot copy // sub's snapshot copy
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0); INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(
Snapshot.CURRENT_STATE_ID).get(0);
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot()); assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent()); assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
// check the children of subsub // check the children of subsub
INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub; INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
children = snapshotSubsubDir.getChildrenList(null); children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2, children.size()); assertEquals(2, children.size());
assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName()); assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName()); assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
// only one child before snapshot s0 // only one child before snapshot s0
children = snapshotSubsubDir.getChildrenList(snapshot0); children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
assertEquals(1, children.size()); assertEquals(1, children.size());
INode child = children.get(0); INode child = children.get(0);
assertEquals(child.getLocalName(), metaChangeFile1.getName()); assertEquals(child.getLocalName(), metaChangeFile1.getName());
@ -341,11 +344,11 @@ public class TestSnapshotDeletion {
assertTrue(metaChangeFile1SCopy.isWithSnapshot()); assertTrue(metaChangeFile1SCopy.isWithSnapshot());
assertFalse(metaChangeFile1SCopy.isUnderConstruction()); assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1, assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(null)); metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1, assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(snapshot1)); metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION, assertEquals(REPLICATION,
metaChangeFile1SCopy.getFileReplication(snapshot0)); metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
} }
/** /**
@ -474,9 +477,10 @@ public class TestSnapshotDeletion {
(INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0")); Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0); assertNull(snapshot0);
Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList = dirNode.getDiffs(); DirectoryDiffList diffList = dirNode.getDiffs();
assertEquals(1, diffList.asList().size()); assertEquals(1, diffList.asList().size());
assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName()); assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory() diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory()
.getDiffs(); .getDiffs();
assertEquals(0, diffList.asList().size()); assertEquals(0, diffList.asList().size());

View File

@ -101,7 +101,8 @@ public class TestSnapshotRename {
List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList(); List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
assertEquals(names.length, listByTime.size()); assertEquals(names.length, listByTime.size());
for (int i = 0; i < listByTime.size(); i++) { for (int i = 0; i < listByTime.size(); i++) {
assertEquals(names[i], listByTime.get(i).getSnapshot().getRoot().getLocalName()); Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId());
assertEquals(names[i], s.getRoot().getLocalName());
} }
} }

View File

@ -150,7 +150,7 @@ public class TestSnapshotReplication {
assertEquals(expectedBlockRep, ssInode.getBlockReplication()); assertEquals(expectedBlockRep, ssInode.getBlockReplication());
// Also check the number derived from INodeFile#getFileReplication // Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(), assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshot())); ssInode.getFileReplication(iip.getPathSnapshotId()));
} }
} }

View File

@ -26,8 +26,6 @@ import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.nio.channels.FileChannel; import java.nio.channels.FileChannel;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -36,168 +34,142 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper; import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.google.common.collect.ImmutableSet;
public class TestOfflineEditsViewer { public class TestOfflineEditsViewer {
private static final Log LOG = LogFactory.getLog(TestOfflineEditsViewer.class); private static final Log LOG = LogFactory
.getLog(TestOfflineEditsViewer.class);
private static final Map<FSEditLogOpCodes, Boolean> obsoleteOpCodes = private static String buildDir = PathUtils
new HashMap<FSEditLogOpCodes, Boolean>(); .getTestDirName(TestOfflineEditsViewer.class);
private static final Map<FSEditLogOpCodes, Boolean> missingOpCodes =
new HashMap<FSEditLogOpCodes, Boolean>();
static {
initializeObsoleteOpCodes();
initializeMissingOpCodes();
}
private static String buildDir =
PathUtils.getTestDirName(TestOfflineEditsViewer.class);
private static String cacheDir =
System.getProperty("test.cache.data", "build/test/cache");
// to create edits and get edits filename // to create edits and get edits filename
private static final OfflineEditsViewerHelper nnHelper private static final OfflineEditsViewerHelper nnHelper = new OfflineEditsViewerHelper();
= new OfflineEditsViewerHelper(); private static final ImmutableSet<FSEditLogOpCodes> skippedOps = skippedOps();
/**
* Initialize obsoleteOpCodes
*
* Reason for suppressing "deprecation" warnings:
*
* These are the opcodes that are not used anymore, some
* are marked deprecated, we need to include them here to make
* sure we exclude them when checking for completeness of testing,
* that's why the "deprecation" warnings are suppressed.
*/
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
private static void initializeObsoleteOpCodes() { private static ImmutableSet<FSEditLogOpCodes> skippedOps() {
obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_ADD, true); ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet
obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_REMOVE, true); .<FSEditLogOpCodes> builder();
obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_NS_QUOTA, true);
obsoleteOpCodes.put(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA, true); // Deprecated opcodes
b.add(FSEditLogOpCodes.OP_DATANODE_ADD)
.add(FSEditLogOpCodes.OP_DATANODE_REMOVE)
.add(FSEditLogOpCodes.OP_SET_NS_QUOTA)
.add(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA)
.add(FSEditLogOpCodes.OP_SET_GENSTAMP_V1);
// Cannot test delegation token related code in insecure set up
b.add(FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN)
.add(FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN)
.add(FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN);
// Skip invalid opcode
b.add(FSEditLogOpCodes.OP_INVALID);
return b.build();
} }
/** @Rule
* Initialize missingOpcodes public final TemporaryFolder folder = new TemporaryFolder();
*
* Opcodes that are not available except after uprade from
* an older version. We don't test these here.
*/
private static void initializeMissingOpCodes() {
obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_GENSTAMP_V1, true);
}
@Before @Before
public void setup() { public void setUp() throws IOException {
new File(cacheDir).mkdirs(); nnHelper.startCluster(buildDir + "/dfs/");
} }
@After
public void tearDown() throws IOException {
nnHelper.shutdownCluster();
}
/** /**
* Test the OfflineEditsViewer * Test the OfflineEditsViewer
*/ */
@Test @Test
public void testGenerated() throws IOException { public void testGenerated() throws IOException {
LOG.info("START - testing with generated edits");
nnHelper.startCluster(buildDir + "/dfs/");
// edits generated by nnHelper (MiniDFSCluster), should have all op codes // edits generated by nnHelper (MiniDFSCluster), should have all op codes
// binary, XML, reparsed binary // binary, XML, reparsed binary
String edits = nnHelper.generateEdits(); String edits = nnHelper.generateEdits();
String editsParsedXml = cacheDir + "/editsParsed.xml"; String editsParsedXml = folder.newFile("editsParsed.xml").getAbsolutePath();
String editsReparsed = cacheDir + "/editsReparsed"; String editsReparsed = folder.newFile("editsParsed").getAbsolutePath();
// parse to XML then back to binary // parse to XML then back to binary
assertEquals(0, runOev(edits, editsParsedXml, "xml", false)); assertEquals(0, runOev(edits, editsParsedXml, "xml", false));
assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false)); assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false));
// judgment time // judgment time
assertTrue("Edits " + edits + " should have all op codes",
hasAllOpCodes(edits));
LOG.info("Comparing generated file " + editsReparsed
+ " with reference file " + edits);
assertTrue( assertTrue(
"Edits " + edits + " should have all op codes", "Generated edits and reparsed (bin to XML to bin) should be same",
hasAllOpCodes(edits)); filesEqualIgnoreTrailingZeros(edits, editsReparsed));
LOG.info("Comparing generated file " + editsReparsed +
" with reference file " + edits);
assertTrue(
"Generated edits and reparsed (bin to XML to bin) should be same",
filesEqualIgnoreTrailingZeros(edits, editsReparsed));
// removes edits so do this at the end
nnHelper.shutdownCluster();
LOG.info("END");
} }
@Test @Test
public void testRecoveryMode() throws IOException { public void testRecoveryMode() throws IOException {
LOG.info("START - testing with generated edits");
nnHelper.startCluster(buildDir + "/dfs/");
// edits generated by nnHelper (MiniDFSCluster), should have all op codes // edits generated by nnHelper (MiniDFSCluster), should have all op codes
// binary, XML, reparsed binary // binary, XML, reparsed binary
String edits = nnHelper.generateEdits(); String edits = nnHelper.generateEdits();
FileOutputStream os = new FileOutputStream(edits, true);
// Corrupt the file by truncating the end // Corrupt the file by truncating the end
FileChannel editsFile = new FileOutputStream(edits, true).getChannel(); FileChannel editsFile = os.getChannel();
editsFile.truncate(editsFile.size() - 5); editsFile.truncate(editsFile.size() - 5);
String editsParsedXml = cacheDir + "/editsRecoveredParsed.xml"; String editsParsedXml = folder.newFile("editsRecoveredParsed.xml")
String editsReparsed = cacheDir + "/editsRecoveredReparsed"; .getAbsolutePath();
String editsParsedXml2 = cacheDir + "/editsRecoveredParsed2.xml"; String editsReparsed = folder.newFile("editsRecoveredReparsed")
.getAbsolutePath();
String editsParsedXml2 = folder.newFile("editsRecoveredParsed2.xml")
.getAbsolutePath();
// Can't read the corrupted file without recovery mode // Can't read the corrupted file without recovery mode
assertEquals(-1, runOev(edits, editsParsedXml, "xml", false)); assertEquals(-1, runOev(edits, editsParsedXml, "xml", false));
// parse to XML then back to binary // parse to XML then back to binary
assertEquals(0, runOev(edits, editsParsedXml, "xml", true)); assertEquals(0, runOev(edits, editsParsedXml, "xml", true));
assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false)); assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false));
assertEquals(0, runOev(editsReparsed, editsParsedXml2, "xml", false)); assertEquals(0, runOev(editsReparsed, editsParsedXml2, "xml", false));
// judgment time // judgment time
assertTrue("Test round trip", assertTrue("Test round trip",
filesEqualIgnoreTrailingZeros(editsParsedXml, editsParsedXml2)); filesEqualIgnoreTrailingZeros(editsParsedXml, editsParsedXml2));
// removes edits so do this at the end os.close();
nnHelper.shutdownCluster();
LOG.info("END");
} }
@Test @Test
public void testStored() throws IOException { public void testStored() throws IOException {
LOG.info("START - testing with stored reference edits");
// reference edits stored with source code (see build.xml) // reference edits stored with source code (see build.xml)
final String cacheDir = System.getProperty("test.cache.data",
"build/test/cache");
// binary, XML, reparsed binary // binary, XML, reparsed binary
String editsStored = cacheDir + "/editsStored"; String editsStored = cacheDir + "/editsStored";
String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml"; String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml";
String editsStoredReparsed = cacheDir + "/editsStoredReparsed"; String editsStoredReparsed = cacheDir + "/editsStoredReparsed";
// reference XML version of editsStored (see build.xml) // reference XML version of editsStored (see build.xml)
String editsStoredXml = cacheDir + "/editsStored.xml"; String editsStoredXml = cacheDir + "/editsStored.xml";
// parse to XML then back to binary // parse to XML then back to binary
assertEquals(0, runOev(editsStored, editsStoredParsedXml, "xml", false)); assertEquals(0, runOev(editsStored, editsStoredParsedXml, "xml", false));
assertEquals(0, runOev(editsStoredParsedXml, editsStoredReparsed, assertEquals(0,
"binary", false)); runOev(editsStoredParsedXml, editsStoredReparsed, "binary", false));
// judgement time // judgement time
assertTrue("Edits " + editsStored + " should have all op codes",
hasAllOpCodes(editsStored));
assertTrue("Reference XML edits and parsed to XML should be same",
filesEqual(editsStoredXml, editsStoredParsedXml));
assertTrue( assertTrue(
"Edits " + editsStored + " should have all op codes", "Reference edits and reparsed (bin to XML to bin) should be same",
hasAllOpCodes(editsStored)); filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed));
assertTrue(
"Reference XML edits and parsed to XML should be same",
filesEqual(editsStoredXml, editsStoredParsedXml));
assertTrue(
"Reference edits and reparsed (bin to XML to bin) should be same",
filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed));
LOG.info("END");
} }
/** /**
@ -233,22 +205,17 @@ public class TestOfflineEditsViewer {
OfflineEditsViewer oev = new OfflineEditsViewer(); OfflineEditsViewer oev = new OfflineEditsViewer();
if (oev.go(inFilename, outFilename, "stats", new Flags(), visitor) != 0) if (oev.go(inFilename, outFilename, "stats", new Flags(), visitor) != 0)
return false; return false;
LOG.info("Statistics for " + inFilename + "\n" + LOG.info("Statistics for " + inFilename + "\n"
visitor.getStatisticsString()); + visitor.getStatisticsString());
boolean hasAllOpCodes = true; boolean hasAllOpCodes = true;
for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) { for (FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
// don't need to test obsolete opCodes // don't need to test obsolete opCodes
if(obsoleteOpCodes.containsKey(opCode)) { if (skippedOps.contains(opCode))
continue; continue;
} else if (missingOpCodes.containsKey(opCode)) {
continue;
} else if (opCode == FSEditLogOpCodes.OP_INVALID) {
continue;
}
Long count = visitor.getStatistics().get(opCode); Long count = visitor.getStatistics().get(opCode);
if((count == null) || (count == 0)) { if ((count == null) || (count == 0)) {
hasAllOpCodes = false; hasAllOpCodes = false;
LOG.info("Opcode " + opCode + " not tested in " + inFilename); LOG.info("Opcode " + opCode + " not tested in " + inFilename);
} }
@ -257,9 +224,9 @@ public class TestOfflineEditsViewer {
} }
/** /**
* Compare two files, ignore trailing zeros at the end, * Compare two files, ignore trailing zeros at the end, for edits log the
* for edits log the trailing zeros do not make any difference, * trailing zeros do not make any difference, throw exception is the files are
* throw exception is the files are not same * not same
* *
* @param filenameSmall first file to compare (doesn't have to be smaller) * @param filenameSmall first file to compare (doesn't have to be smaller)
* @param filenameLarge second file to compare (doesn't have to be larger) * @param filenameLarge second file to compare (doesn't have to be larger)
@ -271,7 +238,7 @@ public class TestOfflineEditsViewer {
ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge)); ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge));
// now correct if it's otherwise // now correct if it's otherwise
if(small.capacity() > large.capacity()) { if (small.capacity() > large.capacity()) {
ByteBuffer tmpByteBuffer = small; ByteBuffer tmpByteBuffer = small;
small = large; small = large;
large = tmpByteBuffer; large = tmpByteBuffer;
@ -288,13 +255,15 @@ public class TestOfflineEditsViewer {
large.limit(small.capacity()); large.limit(small.capacity());
// compares position to limit // compares position to limit
if(!small.equals(large)) { return false; } if (!small.equals(large)) {
return false;
}
// everything after limit should be 0xFF // everything after limit should be 0xFF
int i = large.limit(); int i = large.limit();
large.clear(); large.clear();
for(; i < large.capacity(); i++) { for (; i < large.capacity(); i++) {
if(large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) { if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
return false; return false;
} }
} }

View File

@ -196,6 +196,8 @@ Release 2.4.0 - UNRELEASED
MAPREDUCE-5550. Task Status message (reporter.setStatus) not shown in UI MAPREDUCE-5550. Task Status message (reporter.setStatus) not shown in UI
with Hadoop 2.0 (Gera Shegalov via Sandy Ryza) with Hadoop 2.0 (Gera Shegalov via Sandy Ryza)
MAPREDUCE-3310. Custom grouping comparator cannot be set for Combiners (tucu)
OPTIMIZATIONS OPTIMIZATIONS
MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza) MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza)
@ -267,6 +269,9 @@ Release 2.4.0 - UNRELEASED
MAPREDUCE-5685. Fixed a bug with JobContext getCacheFiles API inside the MAPREDUCE-5685. Fixed a bug with JobContext getCacheFiles API inside the
WrappedReducer class. (Yi Song via vinodkv) WrappedReducer class. (Yi Song via vinodkv)
MAPREDUCE-5689. MRAppMaster does not preempt reducers when scheduled maps
cannot be fulfilled. (lohit via kasha)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -229,7 +229,8 @@ public class RMContainerAllocator extends RMContainerRequestor
int completedMaps = getJob().getCompletedMaps(); int completedMaps = getJob().getCompletedMaps();
int completedTasks = completedMaps + getJob().getCompletedReduces(); int completedTasks = completedMaps + getJob().getCompletedReduces();
if (lastCompletedTasks != completedTasks) { if ((lastCompletedTasks != completedTasks) ||
(scheduledRequests.maps.size() > 0)) {
lastCompletedTasks = completedTasks; lastCompletedTasks = completedTasks;
recalculateReduceSchedule = true; recalculateReduceSchedule = true;
} }

View File

@ -1604,6 +1604,21 @@ public class TestRMContainerAllocator {
numPendingReduces, numPendingReduces,
maxReduceRampupLimit, reduceSlowStart); maxReduceRampupLimit, reduceSlowStart);
verify(allocator).rampDownReduces(anyInt()); verify(allocator).rampDownReduces(anyInt());
// Test reduce ramp-down for when there are scheduled maps
// Since we have two scheduled Maps, rampDownReducers
// should be invoked twice.
scheduledMaps = 2;
assignedReduces = 2;
doReturn(10 * 1024).when(allocator).getMemLimit();
allocator.scheduleReduces(
totalMaps, succeededMaps,
scheduledMaps, scheduledReduces,
assignedMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator, times(2)).rampDownReduces(anyInt());
} }
private static class RecalculateContainerAllocator extends MyContainerAllocator { private static class RecalculateContainerAllocator extends MyContainerAllocator {

View File

@ -949,12 +949,29 @@ public class JobConf extends Configuration {
return get(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS); return get(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS);
} }
/**
* Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the combiner.
*
* @return comparator set by the user for grouping values.
* @see #setCombinerKeyGroupingComparator(Class) for details.
*/
public RawComparator getCombinerKeyGroupingComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.COMBINER_GROUP_COMPARATOR_CLASS, null, RawComparator.class);
if (theClass == null) {
return getOutputKeyComparator();
}
return ReflectionUtils.newInstance(theClass, this);
}
/** /**
* Get the user defined {@link WritableComparable} comparator for * Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the reduce. * grouping keys of inputs to the reduce.
* *
* @return comparator set by the user for grouping values. * @return comparator set by the user for grouping values.
* @see #setOutputValueGroupingComparator(Class) for details. * @see #setOutputValueGroupingComparator(Class) for details.
*/ */
public RawComparator getOutputValueGroupingComparator() { public RawComparator getOutputValueGroupingComparator() {
Class<? extends RawComparator> theClass = getClass( Class<? extends RawComparator> theClass = getClass(
@ -966,6 +983,37 @@ public class JobConf extends Configuration {
return ReflectionUtils.newInstance(theClass, this); return ReflectionUtils.newInstance(theClass, this);
} }
/**
* Set the user defined {@link RawComparator} comparator for
* grouping keys in the input to the combiner.
* <p/>
* <p>This comparator should be provided if the equivalence rules for keys
* for sorting the intermediates are different from those for grouping keys
* before each call to
* {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
* <p/>
* <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
* in a single call to the reduce function if K1 and K2 compare as equal.</p>
* <p/>
* <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
* how keys are sorted, this can be used in conjunction to simulate
* <i>secondary sort on values</i>.</p>
* <p/>
* <p><i>Note</i>: This is not a guarantee of the combiner sort being
* <i>stable</i> in any sense. (In any case, with the order of available
* map-outputs to the combiner being non-deterministic, it wouldn't make
* that much sense.)</p>
*
* @param theClass the comparator class to be used for grouping keys for the
* combiner. It should implement <code>RawComparator</code>.
* @see #setOutputKeyComparatorClass(Class)
*/
public void setCombinerKeyGroupingComparator(
Class<? extends RawComparator> theClass) {
setClass(JobContext.COMBINER_GROUP_COMPARATOR_CLASS,
theClass, RawComparator.class);
}
/** /**
* Set the user defined {@link RawComparator} comparator for * Set the user defined {@link RawComparator} comparator for
* grouping keys in the input to the reduce. * grouping keys in the input to the reduce.
@ -989,7 +1037,8 @@ public class JobConf extends Configuration {
* *
* @param theClass the comparator class to be used for grouping keys. * @param theClass the comparator class to be used for grouping keys.
* It should implement <code>RawComparator</code>. * It should implement <code>RawComparator</code>.
* @see #setOutputKeyComparatorClass(Class) * @see #setOutputKeyComparatorClass(Class)
* @see #setCombinerKeyGroupingComparator(Class)
*/ */
public void setOutputValueGroupingComparator( public void setOutputValueGroupingComparator(
Class<? extends RawComparator> theClass) { Class<? extends RawComparator> theClass) {

View File

@ -1575,7 +1575,8 @@ abstract public class Task implements Writable, Configurable {
combinerClass = cls; combinerClass = cls;
keyClass = (Class<K>) job.getMapOutputKeyClass(); keyClass = (Class<K>) job.getMapOutputKeyClass();
valueClass = (Class<V>) job.getMapOutputValueClass(); valueClass = (Class<V>) job.getMapOutputValueClass();
comparator = (RawComparator<K>) job.getOutputKeyComparator(); comparator = (RawComparator<K>)
job.getCombinerKeyGroupingComparator();
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@ -1624,7 +1625,7 @@ abstract public class Task implements Writable, Configurable {
this.taskId = taskId; this.taskId = taskId;
keyClass = (Class<K>) context.getMapOutputKeyClass(); keyClass = (Class<K>) context.getMapOutputKeyClass();
valueClass = (Class<V>) context.getMapOutputValueClass(); valueClass = (Class<V>) context.getMapOutputValueClass();
comparator = (RawComparator<K>) context.getSortComparator(); comparator = (RawComparator<K>) context.getCombinerKeyGroupingComparator();
this.committer = committer; this.committer = committer;
} }

View File

@ -948,11 +948,27 @@ public class Job extends JobContextImpl implements JobContext {
conf.setOutputValueClass(theClass); conf.setOutputValueClass(theClass);
} }
/**
* Define the comparator that controls which keys are grouped together
* for a single call to combiner,
* {@link Reducer#reduce(Object, Iterable,
* org.apache.hadoop.mapreduce.Reducer.Context)}
*
* @param cls the raw comparator to use
* @throws IllegalStateException if the job is submitted
*/
public void setCombinerKeyGroupingComparatorClass(
Class<? extends RawComparator> cls) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setCombinerKeyGroupingComparator(cls);
}
/** /**
* Define the comparator that controls how the keys are sorted before they * Define the comparator that controls how the keys are sorted before they
* are passed to the {@link Reducer}. * are passed to the {@link Reducer}.
* @param cls the raw comparator * @param cls the raw comparator
* @throws IllegalStateException if the job is submitted * @throws IllegalStateException if the job is submitted
* @see #setCombinerKeyGroupingComparatorClass(Class)
*/ */
public void setSortComparatorClass(Class<? extends RawComparator> cls public void setSortComparatorClass(Class<? extends RawComparator> cls
) throws IllegalStateException { ) throws IllegalStateException {
@ -967,6 +983,7 @@ public class Job extends JobContextImpl implements JobContext {
* org.apache.hadoop.mapreduce.Reducer.Context)} * org.apache.hadoop.mapreduce.Reducer.Context)}
* @param cls the raw comparator to use * @param cls the raw comparator to use
* @throws IllegalStateException if the job is submitted * @throws IllegalStateException if the job is submitted
* @see #setCombinerKeyGroupingComparatorClass(Class)
*/ */
public void setGroupingComparatorClass(Class<? extends RawComparator> cls public void setGroupingComparatorClass(Class<? extends RawComparator> cls
) throws IllegalStateException { ) throws IllegalStateException {

View File

@ -167,13 +167,23 @@ public interface JobContext extends MRJobConfig {
*/ */
public String getJar(); public String getJar();
/** /**
* Get the user defined {@link RawComparator} comparator for * Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the reduce. * grouping keys of inputs to the combiner.
* *
* @return comparator set by the user for grouping values. * @return comparator set by the user for grouping values.
* @see Job#setGroupingComparatorClass(Class) for details. * @see Job#setCombinerKeyGroupingComparatorClass(Class)
*/ */
public RawComparator<?> getCombinerKeyGroupingComparator();
/**
* Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the reduce.
*
* @return comparator set by the user for grouping values.
* @see Job#setGroupingComparatorClass(Class)
* @see #getCombinerKeyGroupingComparator()
*/
public RawComparator<?> getGroupingComparator(); public RawComparator<?> getGroupingComparator();
/** /**

View File

@ -93,6 +93,8 @@ public interface MRJobConfig {
public static final String KEY_COMPARATOR = "mapreduce.job.output.key.comparator.class"; public static final String KEY_COMPARATOR = "mapreduce.job.output.key.comparator.class";
public static final String COMBINER_GROUP_COMPARATOR_CLASS = "mapreduce.job.combiner.group.comparator.class";
public static final String GROUP_COMPARATOR_CLASS = "mapreduce.job.output.group.comparator.class"; public static final String GROUP_COMPARATOR_CLASS = "mapreduce.job.output.group.comparator.class";
public static final String WORKING_DIR = "mapreduce.job.working.dir"; public static final String WORKING_DIR = "mapreduce.job.working.dir";

View File

@ -166,6 +166,11 @@ class ChainMapContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT> implements
return base.getFileTimestamps(); return base.getFileTimestamps();
} }
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return base.getCombinerKeyGroupingComparator();
}
@Override @Override
public RawComparator<?> getGroupingComparator() { public RawComparator<?> getGroupingComparator() {
return base.getGroupingComparator(); return base.getGroupingComparator();

View File

@ -159,6 +159,11 @@ class ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT> implements
return base.getFileTimestamps(); return base.getFileTimestamps();
} }
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return base.getCombinerKeyGroupingComparator();
}
@Override @Override
public RawComparator<?> getGroupingComparator() { public RawComparator<?> getGroupingComparator() {
return base.getGroupingComparator(); return base.getGroupingComparator();

View File

@ -168,6 +168,11 @@ public class WrappedMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
return mapContext.getFileTimestamps(); return mapContext.getFileTimestamps();
} }
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return mapContext.getCombinerKeyGroupingComparator();
}
@Override @Override
public RawComparator<?> getGroupingComparator() { public RawComparator<?> getGroupingComparator() {
return mapContext.getGroupingComparator(); return mapContext.getGroupingComparator();

View File

@ -161,6 +161,11 @@ public class WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
return reduceContext.getFileTimestamps(); return reduceContext.getFileTimestamps();
} }
@Override
public RawComparator<?> getCombinerKeyGroupingComparator() {
return reduceContext.getCombinerKeyGroupingComparator();
}
@Override @Override
public RawComparator<?> getGroupingComparator() { public RawComparator<?> getGroupingComparator() {
return reduceContext.getGroupingComparator(); return reduceContext.getGroupingComparator();

View File

@ -252,6 +252,17 @@ public class JobContextImpl implements JobContext {
return conf.getJar(); return conf.getJar();
} }
/**
* Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the combiner.
*
* @return comparator set by the user for grouping values.
* @see Job#setCombinerKeyGroupingComparatorClass(Class) for details.
*/
public RawComparator<?> getCombinerKeyGroupingComparator() {
return conf.getCombinerKeyGroupingComparator();
}
/** /**
* Get the user defined {@link RawComparator} comparator for * Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the reduce. * grouping keys of inputs to the reduce.

View File

@ -582,7 +582,7 @@ public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass(); Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
Class<V> valClass = (Class<V>) job.getMapOutputValueClass(); Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
RawComparator<K> comparator = RawComparator<K> comparator =
(RawComparator<K>)job.getOutputKeyComparator(); (RawComparator<K>)job.getCombinerKeyGroupingComparator();
try { try {
CombineValuesIterator values = new CombineValuesIterator( CombineValuesIterator values = new CombineValuesIterator(
kvIter, comparator, keyClass, valClass, job, Reporter.NULL, kvIter, comparator, keyClass, valClass, job, Reporter.NULL,

View File

@ -0,0 +1,191 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.framework.Assert;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.UUID;
public class TestOldCombinerGrouping {
private static String TEST_ROOT_DIR =
new File("build", UUID.randomUUID().toString()).getAbsolutePath();
public static class Map implements
Mapper<LongWritable, Text, Text, LongWritable> {
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, LongWritable> output, Reporter reporter)
throws IOException {
String v = value.toString();
String k = v.substring(0, v.indexOf(","));
v = v.substring(v.indexOf(",") + 1);
output.collect(new Text(k), new LongWritable(Long.parseLong(v)));
}
@Override
public void close() throws IOException {
}
@Override
public void configure(JobConf job) {
}
}
public static class Reduce implements
Reducer<Text, LongWritable, Text, LongWritable> {
@Override
public void reduce(Text key, Iterator<LongWritable> values,
OutputCollector<Text, LongWritable> output, Reporter reporter)
throws IOException {
LongWritable maxValue = null;
while (values.hasNext()) {
LongWritable value = values.next();
if (maxValue == null) {
maxValue = value;
} else if (value.compareTo(maxValue) > 0) {
maxValue = value;
}
}
output.collect(key, maxValue);
}
@Override
public void close() throws IOException {
}
@Override
public void configure(JobConf job) {
}
}
public static class Combiner extends Reduce {
}
public static class GroupComparator implements RawComparator<Text> {
@Override
public int compare(byte[] bytes, int i, int i2, byte[] bytes2, int i3,
int i4) {
byte[] b1 = new byte[i2];
System.arraycopy(bytes, i, b1, 0, i2);
byte[] b2 = new byte[i4];
System.arraycopy(bytes2, i3, b2, 0, i4);
return compare(new Text(new String(b1)), new Text(new String(b2)));
}
@Override
public int compare(Text o1, Text o2) {
String s1 = o1.toString();
String s2 = o2.toString();
s1 = s1.substring(0, s1.indexOf("|"));
s2 = s2.substring(0, s2.indexOf("|"));
return s1.compareTo(s2);
}
}
@Test
public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in = new File(TEST_ROOT_DIR, "input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out = new File(TEST_ROOT_DIR, "output");
PrintWriter pw = new PrintWriter(new FileWriter(new File(in, "data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf job = new JobConf();
job.set("mapreduce.framework.name", "local");
TextInputFormat.setInputPaths(job, new Path(in.getPath()));
TextOutputFormat.setOutputPath(job, new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputValueGroupingComparator(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.setCombinerKeyGroupingComparator(GroupComparator.class);
job.setInt("min.num.spills.for.combine", 0);
JobClient client = new JobClient(job);
RunningJob runningJob = client.submitJob(job);
runningJob.waitForCompletion();
if (runningJob.isSuccessful()) {
Counters counters = runningJob.getCounters();
long combinerInputRecords = counters.getGroup(
"org.apache.hadoop.mapreduce.TaskCounter").
getCounter("COMBINE_INPUT_RECORDS");
long combinerOutputRecords = counters.getGroup(
"org.apache.hadoop.mapreduce.TaskCounter").
getCounter("COMBINE_OUTPUT_RECORDS");
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br = new BufferedReader(new FileReader(
new File(out, "part-00000")));
Set<String> output = new HashSet<String>();
String line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNull(line);
br.close();
Set<String> expected = new HashSet<String>();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected, output);
} else {
Assert.fail("Job failed");
}
}
}

View File

@ -0,0 +1,178 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import junit.framework.Assert;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashSet;
import java.util.Set;
import java.util.UUID;
public class TestNewCombinerGrouping {
private static String TEST_ROOT_DIR =
new File("build", UUID.randomUUID().toString()).getAbsolutePath();
public static class Map extends
Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value,
Context context)
throws IOException, InterruptedException {
String v = value.toString();
String k = v.substring(0, v.indexOf(","));
v = v.substring(v.indexOf(",") + 1);
context.write(new Text(k), new LongWritable(Long.parseLong(v)));
}
}
public static class Reduce extends
Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values,
Context context)
throws IOException, InterruptedException {
LongWritable maxValue = null;
for (LongWritable value : values) {
if (maxValue == null) {
maxValue = value;
} else if (value.compareTo(maxValue) > 0) {
maxValue = value;
}
}
context.write(key, maxValue);
}
}
public static class Combiner extends Reduce {
}
public static class GroupComparator implements RawComparator<Text> {
@Override
public int compare(byte[] bytes, int i, int i2, byte[] bytes2, int i3,
int i4) {
byte[] b1 = new byte[i2];
System.arraycopy(bytes, i, b1, 0, i2);
byte[] b2 = new byte[i4];
System.arraycopy(bytes2, i3, b2, 0, i4);
return compare(new Text(new String(b1)), new Text(new String(b2)));
}
@Override
public int compare(Text o1, Text o2) {
String s1 = o1.toString();
String s2 = o2.toString();
s1 = s1.substring(0, s1.indexOf("|"));
s2 = s2.substring(0, s2.indexOf("|"));
return s1.compareTo(s2);
}
}
@Test
public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in = new File(TEST_ROOT_DIR, "input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out = new File(TEST_ROOT_DIR, "output");
PrintWriter pw = new PrintWriter(new FileWriter(new File(in, "data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf conf = new JobConf();
conf.set("mapreduce.framework.name", "local");
Job job = new Job(conf);
TextInputFormat.setInputPaths(job, new Path(in.getPath()));
TextOutputFormat.setOutputPath(job, new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupComparator.class);
job.setCombinerKeyGroupingComparatorClass(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.getConfiguration().setInt("min.num.spills.for.combine", 0);
job.submit();
job.waitForCompletion(false);
if (job.isSuccessful()) {
Counters counters = job.getCounters();
long combinerInputRecords = counters.findCounter(
"org.apache.hadoop.mapreduce.TaskCounter",
"COMBINE_INPUT_RECORDS").getValue();
long combinerOutputRecords = counters.findCounter(
"org.apache.hadoop.mapreduce.TaskCounter",
"COMBINE_OUTPUT_RECORDS").getValue();
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br = new BufferedReader(new FileReader(
new File(out, "part-r-00000")));
Set<String> output = new HashSet<String>();
String line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNull(line);
br.close();
Set<String> expected = new HashSet<String>();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected, output);
} else {
Assert.fail("Job failed");
}
}
}

View File

@ -55,6 +55,9 @@ Release 2.4.0 - UNRELEASED
YARN-1028. Added FailoverProxyProvider capability to ResourceManager to help YARN-1028. Added FailoverProxyProvider capability to ResourceManager to help
with RM failover. (Karthik Kambatla via vinodkv) with RM failover. (Karthik Kambatla via vinodkv)
YARN-1029. Added embedded leader election in the ResourceManager. (Karthik
Kambatla via vinodkv)
IMPROVEMENTS IMPROVEMENTS
YARN-7. Support CPU resource for DistributedShell. (Junping Du via llu) YARN-7. Support CPU resource for DistributedShell. (Junping Du via llu)
@ -197,6 +200,11 @@ Release 2.4.0 - UNRELEASED
YARN-1493. Changed ResourceManager and Scheduler interfacing to recognize YARN-1493. Changed ResourceManager and Scheduler interfacing to recognize
app-attempts separately from apps. (Jian He via vinodkv) app-attempts separately from apps. (Jian He via vinodkv)
YARN-1482. Modified WebApplicationProxy to make it work across ResourceManager
fail-over. (Xuan Gong via vinodkv)
YARN-1568. Rename clusterid to clusterId in ActiveRMInfoProto (kasha)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -286,6 +294,18 @@ Release 2.4.0 - UNRELEASED
YARN-1549. Fixed a bug in ResourceManager's ApplicationMasterService that YARN-1549. Fixed a bug in ResourceManager's ApplicationMasterService that
was causing unamanged AMs to not finish correctly. (haosdent via vinodkv) was causing unamanged AMs to not finish correctly. (haosdent via vinodkv)
YARN-1559. Race between ServerRMProxy and ClientRMProxy setting
RMProxy#INSTANCE. (kasha and vinodkv via kasha)
YARN-1560. Fixed TestYarnClient#testAMMRTokens failure with null AMRM token.
(Ted Yu via jianhe)
YARN-1409. NonAggregatingLogHandler can throw RejectedExecutionException
(Tsuyoshi OZAWA via jlowe)
YARN-1293. Fixed TestContainerLaunch#testInvalidEnvSyntaxDiagnostics failure
caused by non-English system locale. (Tsuyoshi OZAWA via jianhe)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -309,13 +309,4 @@
<Class name="org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore" /> <Class name="org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore" />
<Bug pattern="IS2_INCONSISTENT_SYNC" /> <Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match> </Match>
<!-- Ignore INSTANCE not being final as it is created in sub-classes -->
<Match>
<Class name="org.apache.hadoop.yarn.client.RMProxy" />
<Field name="INSTANCE" />
<Bug pattern="MS_SHOULD_BE_FINAL"/>
</Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -51,6 +51,22 @@ public class HAUtil {
YarnConfiguration.DEFAULT_RM_HA_ENABLED); YarnConfiguration.DEFAULT_RM_HA_ENABLED);
} }
public static boolean isAutomaticFailoverEnabled(Configuration conf) {
return conf.getBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,
YarnConfiguration.DEFAULT_AUTO_FAILOVER_ENABLED);
}
public static boolean isAutomaticFailoverEnabledAndEmbedded(
Configuration conf) {
return isAutomaticFailoverEnabled(conf) &&
isAutomaticFailoverEmbedded(conf);
}
public static boolean isAutomaticFailoverEmbedded(Configuration conf) {
return conf.getBoolean(YarnConfiguration.AUTO_FAILOVER_EMBEDDED,
YarnConfiguration.DEFAULT_AUTO_FAILOVER_EMBEDDED);
}
/** /**
* Verify configuration for Resource Manager HA. * Verify configuration for Resource Manager HA.
* @param conf Configuration * @param conf Configuration
@ -162,8 +178,7 @@ public class HAUtil {
* @param conf Configuration. Please use verifyAndSetRMHAId to check. * @param conf Configuration. Please use verifyAndSetRMHAId to check.
* @return RM Id on success * @return RM Id on success
*/ */
@VisibleForTesting public static String getRMHAId(Configuration conf) {
static String getRMHAId(Configuration conf) {
return conf.get(YarnConfiguration.RM_HA_ID); return conf.get(YarnConfiguration.RM_HA_ID);
} }

View File

@ -59,7 +59,7 @@ public class YarnConfiguration extends Configuration {
public static final String IPC_PREFIX = YARN_PREFIX + "ipc."; public static final String IPC_PREFIX = YARN_PREFIX + "ipc.";
/** Factory to create client IPC classes.*/ /** Factory to create client IPC classes.*/
public static final String IPC_CLIENT_FACTORY_CLASS = public static final String IPC_CLIENT_FACTORY_CLASS =
IPC_PREFIX + "client.factory.class"; IPC_PREFIX + "client.factory.class";
public static final String DEFAULT_IPC_CLIENT_FACTORY_CLASS = public static final String DEFAULT_IPC_CLIENT_FACTORY_CLASS =
"org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl"; "org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl";
@ -87,6 +87,8 @@ public class YarnConfiguration extends Configuration {
//////////////////////////////// ////////////////////////////////
public static final String RM_PREFIX = "yarn.resourcemanager."; public static final String RM_PREFIX = "yarn.resourcemanager.";
public static final String RM_CLUSTER_ID = RM_PREFIX + "cluster-id";
/** The address of the applications manager interface in the RM.*/ /** The address of the applications manager interface in the RM.*/
public static final String RM_ADDRESS = public static final String RM_ADDRESS =
RM_PREFIX + "address"; RM_PREFIX + "address";
@ -278,6 +280,36 @@ public class YarnConfiguration extends Configuration {
public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled"; public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled";
public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false; public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false;
/** Zookeeper interaction configs */
public static final String RM_ZK_PREFIX = RM_PREFIX + "zk-";
public static final String RM_ZK_ADDRESS = RM_ZK_PREFIX + "address";
public static final String RM_ZK_NUM_RETRIES = RM_ZK_PREFIX + "num-retries";
public static final int DEFAULT_ZK_RM_NUM_RETRIES = 500;
public static final String RM_ZK_RETRY_INTERVAL_MS =
RM_ZK_PREFIX + "retry-interval-ms";
public static final long DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 2000;
public static final String RM_ZK_TIMEOUT_MS = RM_ZK_PREFIX + "timeout-ms";
public static final int DEFAULT_RM_ZK_TIMEOUT_MS = 10000;
public static final String RM_ZK_ACL = RM_ZK_PREFIX + "acl";
public static final String DEFAULT_RM_ZK_ACL = "world:anyone:rwcda";
public static final String ZK_STATE_STORE_PREFIX =
RM_PREFIX + "zk-state-store.";
/** Parent znode path under which ZKRMStateStore will create znodes */
public static final String ZK_RM_STATE_STORE_PARENT_PATH =
ZK_STATE_STORE_PREFIX + "parent-path";
public static final String DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH = "/rmstore";
/** Root node ACLs for fencing */
public static final String ZK_RM_STATE_STORE_ROOT_NODE_ACL =
ZK_STATE_STORE_PREFIX + "root-node.acl";
/** HA related configs */ /** HA related configs */
public static final String RM_HA_PREFIX = RM_PREFIX + "ha."; public static final String RM_HA_PREFIX = RM_PREFIX + "ha.";
public static final String RM_HA_ENABLED = RM_HA_PREFIX + "enabled"; public static final String RM_HA_ENABLED = RM_HA_PREFIX + "enabled";
@ -296,6 +328,22 @@ public class YarnConfiguration extends Configuration {
HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS
: RM_WEBAPP_ADDRESS)); : RM_WEBAPP_ADDRESS));
public static final String AUTO_FAILOVER_PREFIX =
RM_HA_PREFIX + "automatic-failover.";
public static final String AUTO_FAILOVER_ENABLED =
AUTO_FAILOVER_PREFIX + "enabled";
public static final boolean DEFAULT_AUTO_FAILOVER_ENABLED = false;
public static final String AUTO_FAILOVER_EMBEDDED =
AUTO_FAILOVER_PREFIX + "embedded";
public static final boolean DEFAULT_AUTO_FAILOVER_EMBEDDED = false;
public static final String AUTO_FAILOVER_ZK_BASE_PATH =
AUTO_FAILOVER_PREFIX + "zk-base-path";
public static final String DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH =
"/yarn-leader-election";
public static final String CLIENT_FAILOVER_PREFIX = public static final String CLIENT_FAILOVER_PREFIX =
YARN_PREFIX + "client.failover-"; YARN_PREFIX + "client.failover-";
public static final String CLIENT_FAILOVER_PROXY_PROVIDER = public static final String CLIENT_FAILOVER_PROXY_PROVIDER =
@ -334,36 +382,6 @@ public class YarnConfiguration extends Configuration {
+ "fs.state-store.retry-policy-spec"; + "fs.state-store.retry-policy-spec";
public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC = public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC =
"2000, 500"; "2000, 500";
/**
* Comma separated host:port pairs, each corresponding to a ZK server for
* ZKRMStateStore
*/
public static final String ZK_STATE_STORE_PREFIX =
RM_PREFIX + "zk-state-store.";
public static final String ZK_RM_STATE_STORE_NUM_RETRIES =
ZK_STATE_STORE_PREFIX + "num-retries";
public static final int DEFAULT_ZK_RM_STATE_STORE_NUM_RETRIES = 500;
/** retry interval when connecting to zookeeper*/
public static final String ZK_RM_STATE_STORE_RETRY_INTERVAL_MS =
ZK_STATE_STORE_PREFIX + "retry-interval-ms";
public static final long DEFAULT_ZK_RM_STATE_STORE_RETRY_INTERVAL_MS = 2000;
public static final String ZK_RM_STATE_STORE_ADDRESS =
ZK_STATE_STORE_PREFIX + "address";
/** Timeout in millisec for ZK server connection for ZKRMStateStore */
public static final String ZK_RM_STATE_STORE_TIMEOUT_MS =
ZK_STATE_STORE_PREFIX + "timeout-ms";
public static final int DEFAULT_ZK_RM_STATE_STORE_TIMEOUT_MS = 60000;
/** Parent znode path under which ZKRMStateStore will create znodes */
public static final String ZK_RM_STATE_STORE_PARENT_PATH =
ZK_STATE_STORE_PREFIX + "parent-path";
public static final String DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH = "/rmstore";
/** ACL for znodes in ZKRMStateStore */
public static final String ZK_RM_STATE_STORE_ACL =
ZK_STATE_STORE_PREFIX + "acl";
public static final String DEFAULT_ZK_RM_STATE_STORE_ACL =
"world:anyone:rwcda";
public static final String ZK_RM_STATE_STORE_ROOT_NODE_ACL =
ZK_STATE_STORE_PREFIX + "root-node.acl";
/** The maximum number of completed applications RM keeps. */ /** The maximum number of completed applications RM keeps. */
public static final String RM_MAX_COMPLETED_APPLICATIONS = public static final String RM_MAX_COMPLETED_APPLICATIONS =

View File

@ -133,3 +133,11 @@ message RMStateVersionProto {
optional int32 major_version = 1; optional int32 major_version = 1;
optional int32 minor_version = 2; optional int32 minor_version = 2;
} }
//////////////////////////////////////////////////////////////////
///////////// RM Failover related records ////////////////////////
//////////////////////////////////////////////////////////////////
message ActiveRMInfoProto {
required string clusterId = 1;
required string rmId = 2;
}

View File

@ -30,7 +30,13 @@
</properties> </properties>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId> <artifactId>hadoop-yarn-api</artifactId>
</dependency> </dependency>

Some files were not shown because too many files have changed in this diff Show More