From 470b1197be996106fa166da33fb1d56276b658b4 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 3 May 2012 03:00:10 +0000 Subject: [PATCH 01/70] HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS regardless of whether HA or Federation is enabled. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333299 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/NameNode.java | 24 +++++++++---------- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 19 +++++++++++++++ 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1ee488d6df6..18b93789805 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -589,6 +589,9 @@ Release 2.0.0 - UNRELEASED HDFS-3330. If GetImageServlet throws an Error or RTE, response should not have HTTP "OK" status. (todd) + HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS + regardless of whether HA or Federation is enabled. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 48a056507c4..05bc8c2ec67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1130,20 +1130,18 @@ public class NameNode { */ public static void initializeGenericKeys(Configuration conf, String nameserviceId, String namenodeId) { - if ((nameserviceId == null || nameserviceId.isEmpty()) && - (namenodeId == null || namenodeId.isEmpty())) { - return; + if ((nameserviceId != null && !nameserviceId.isEmpty()) || + (namenodeId != null && !namenodeId.isEmpty())) { + if (nameserviceId != null) { + conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); + } + if (namenodeId != null) { + conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId); + } + + DFSUtil.setGenericConf(conf, nameserviceId, namenodeId, + NAMESERVICE_SPECIFIC_KEYS); } - - if (nameserviceId != null) { - conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); - } - if (namenodeId != null) { - conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId); - } - - DFSUtil.setGenericConf(conf, nameserviceId, namenodeId, - NAMESERVICE_SPECIFIC_KEYS); if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 3f1421e872f..a991d37c8a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -319,6 +319,25 @@ public class TestDFSUtil { } } + /** + * Ensure that fs.defaultFS is set in the configuration even if neither HA nor + * Federation is enabled. + * + * Regression test for HDFS-3351. + */ + @Test + public void testConfModificationNoFederationOrHa() { + final HdfsConfiguration conf = new HdfsConfiguration(); + String nsId = null; + String nnId = null; + + conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234"); + + assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY))); + NameNode.initializeGenericKeys(conf, nsId, nnId); + assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY)); + } + /** * Regression test for HDFS-2934. */ From d94899877709836153244c2f0c5f14c37c9d4795 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 3 May 2012 06:35:45 +0000 Subject: [PATCH 02/70] HADOOP-8347. Hadoop Common logs misspell 'successful'. Contributed by Philip Zeyliger git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333321 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/ipc/Server.java | 4 ++-- .../security/authorize/ServiceAuthorizationManager.java | 4 ++-- .../hadoop/security/JniBasedUnixGroupsNetgroupMapping.c | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index cd7539fb566..f8435a41298 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -284,6 +284,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu) + HADOOP-8347. Hadoop Common logs misspell 'successful'. + (Philip Zeyliger via eli) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 69900421fa2..6369f3aa7d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -217,7 +217,7 @@ public abstract class Server { public static final Log AUDITLOG = LogFactory.getLog("SecurityLogger."+Server.class.getName()); private static final String AUTH_FAILED_FOR = "Auth failed for "; - private static final String AUTH_SUCCESSFULL_FOR = "Auth successfull for "; + private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; private static final ThreadLocal SERVER = new ThreadLocal(); @@ -1234,7 +1234,7 @@ public abstract class Server { LOG.debug("SASL server successfully authenticated client: " + user); } rpcMetrics.incrAuthenticationSuccesses(); - AUDITLOG.info(AUTH_SUCCESSFULL_FOR + user); + AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user); saslContextEstablished = true; } } else { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java index 3a9ad0b5f5b..d17d065bf81 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java @@ -59,7 +59,7 @@ public class ServiceAuthorizationManager { public static final Log AUDITLOG = LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName()); - private static final String AUTHZ_SUCCESSFULL_FOR = "Authorization successfull for "; + private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for "; private static final String AUTHZ_FAILED_FOR = "Authorization failed for "; @@ -108,7 +108,7 @@ public class ServiceAuthorizationManager { " is not authorized for protocol " + protocol + ", expected client Kerberos principal is " + clientPrincipal); } - AUDITLOG.info(AUTHZ_SUCCESSFULL_FOR + user + " for protocol="+protocol); + AUDITLOG.info(AUTHZ_SUCCESSFUL_FOR + user + " for protocol="+protocol); } public synchronized void refresh(Configuration conf, diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c index a6ddcc30358..6a92bb2b92a 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c @@ -70,7 +70,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet // set the name of the group for subsequent calls to getnetgrent // note that we want to end group lokup regardless whether setnetgrent - // was successfull or not (as long as it was called we need to call + // was successful or not (as long as it was called we need to call // endnetgrent) setnetgrentCalledFlag = 1; if(setnetgrent(cgroup) == 1) { From e3242b95b35844a0877a83032d3a7e3d5e9bd9c2 Mon Sep 17 00:00:00 2001 From: Devaraj Das Date: Thu, 3 May 2012 17:16:44 +0000 Subject: [PATCH 03/70] HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due to fixes introduced by the IBM JDK compatibility patch. Contributed by Devaraj Das. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333557 13f79535-47bb-0310-9956-ffa450edef68 --- .../authentication/client/KerberosAuthenticator.java | 8 +++----- .../hadoop/security/authentication/util/KerberosUtil.java | 6 ++---- .../server/TestKerberosAuthenticationHandler.java | 6 +++--- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 48b6cbec6e3..4227d084385 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -26,7 +26,6 @@ import javax.security.auth.login.Configuration; import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; import java.io.IOException; -import java.lang.reflect.Field; import java.net.HttpURLConnection; import java.net.URL; import java.security.AccessControlContext; @@ -196,11 +195,10 @@ public class KerberosAuthenticator implements Authenticator { try { GSSManager gssManager = GSSManager.getInstance(); String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost(); - + Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL"); GSSName serviceName = gssManager.createName(servicePrincipal, - GSSName.NT_HOSTBASED_SERVICE); - Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, - gssManager); + oid); + oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID"); gssContext = gssManager.createContext(serviceName, oid, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestCredDeleg(true); diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java index df8319c6643..5688e600f77 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java @@ -22,7 +22,6 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import org.ietf.jgss.GSSException; -import org.ietf.jgss.GSSManager; import org.ietf.jgss.Oid; public class KerberosUtil { @@ -34,8 +33,7 @@ public class KerberosUtil { : "com.sun.security.auth.module.Krb5LoginModule"; } - public static Oid getOidClassInstance(String servicePrincipal, - GSSManager gssManager) + public static Oid getOidInstance(String oidName) throws ClassNotFoundException, GSSException, NoSuchFieldException, IllegalAccessException { Class oidClass; @@ -44,7 +42,7 @@ public class KerberosUtil { } else { oidClass = Class.forName("sun.security.jgss.GSSUtil"); } - Field oidField = oidClass.getDeclaredField("GSS_KRB5_MECH_OID"); + Field oidField = oidClass.getDeclaredField(oidName); return (Oid)oidField.get(oidClass); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java index e6e7c9cca00..692ceab92da 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java @@ -145,10 +145,10 @@ public class TestKerberosAuthenticationHandler extends TestCase { GSSContext gssContext = null; try { String servicePrincipal = KerberosTestUtils.getServerPrincipal(); + Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL"); GSSName serviceName = gssManager.createName(servicePrincipal, - GSSName.NT_HOSTBASED_SERVICE); - Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, - gssManager); + oid); + oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID"); gssContext = gssManager.createContext(serviceName, oid, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestCredDeleg(true); diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index f8435a41298..d0d134bb306 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -409,6 +409,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8342. HDFS command fails with exception following merge of HADOOP-8325 (tucu) + HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due + to fixes introduced by the IBM JDK compatibility patch. (ddas) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) From 48414b08277b86cdbc34ae36d7c4d204fd838294 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Thu, 3 May 2012 18:35:21 +0000 Subject: [PATCH 04/70] MAPREDUCE-4163. consistently set the bind address (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333579 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 + .../mapred/TaskAttemptListenerImpl.java | 6 +-- .../v2/app/client/MRClientService.java | 15 +----- .../app/launcher/TestContainerLauncher.java | 2 +- .../mapreduce/v2/hs/HistoryClientService.java | 47 ++++++------------- .../hadoop/mapred/TestClientRedirect.java | 4 +- .../hadoop/yarn/TestContainerLaunchRPC.java | 2 +- .../java/org/apache/hadoop/yarn/TestRPC.java | 5 +- .../ContainerManagerImpl.java | 16 ++----- .../ResourceLocalizationService.java | 12 ++--- .../nodemanager/TestNodeStatusUpdater.java | 21 ++++----- .../TestResourceLocalizationService.java | 16 +++++-- .../server/resourcemanager/AdminService.java | 7 +-- .../ApplicationMasterService.java | 10 +--- .../resourcemanager/ClientRMService.java | 7 +-- .../ResourceTrackerService.java | 7 +-- .../server/TestContainerManagerSecurity.java | 3 +- 17 files changed, 61 insertions(+), 121 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5d8a5404e78..2cd5a224c62 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -447,6 +447,8 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4211. Error conditions (missing appid, appid not found) are masked in the RM app page (Jonathan Eagles via bobby) + MAPREDUCE-4163. consistently set the bind address (Daryn Sharp via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java index 164f406017e..fdcec65a90a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java @@ -19,7 +19,6 @@ package org.apache.hadoop.mapred; import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; @@ -127,10 +126,7 @@ public class TaskAttemptListenerImpl extends CompositeService } server.start(); - InetSocketAddress listenerAddress = server.getListenerAddress(); - listenerAddress.getAddress(); - this.address = NetUtils.createSocketAddr(InetAddress.getLocalHost() - .getCanonicalHostName() + ":" + listenerAddress.getPort()); + this.address = NetUtils.getConnectAddress(server); } catch (IOException e) { throw new YarnException(e); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 60b29e831df..0bd730b7dd0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -18,9 +18,7 @@ package org.apache.hadoop.mapreduce.v2.app.client; -import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collection; @@ -78,7 +76,6 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; -import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -116,13 +113,7 @@ public class MRClientService extends AbstractService public void start() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - InetSocketAddress address = NetUtils.createSocketAddr("0.0.0.0:0"); - InetAddress hostNameResolved = null; - try { - hostNameResolved = InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - throw new YarnException(e); - } + InetSocketAddress address = new InetSocketAddress(0); ClientToAMSecretManager secretManager = null; if (UserGroupInformation.isSecurityEnabled()) { @@ -150,9 +141,7 @@ public class MRClientService extends AbstractService } server.start(); - this.bindAddress = - NetUtils.createSocketAddr(hostNameResolved.getHostAddress() - + ":" + server.getPort()); + this.bindAddress = NetUtils.getConnectAddress(server); LOG.info("Instantiated MRClientService at " + this.bindAddress); try { webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").with(conf). diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java index cde1333ec8f..9ae938a8081 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java @@ -356,7 +356,7 @@ public class TestContainerLauncher { // make proxy connect to our local containerManager server ContainerManager proxy = (ContainerManager) rpc.getProxy( ContainerManager.class, - NetUtils.createSocketAddr("localhost:" + server.getPort()), conf); + NetUtils.getConnectAddress(server), conf); return proxy; } }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index a4017631148..b9ecb98a8a6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -19,9 +19,7 @@ package org.apache.hadoop.mapreduce.v2.hs; import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.UnknownHostException; import java.security.AccessControlException; import java.security.PrivilegedExceptionAction; import java.util.Arrays; @@ -76,7 +74,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; @@ -117,17 +114,10 @@ public class HistoryClientService extends AbstractService { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); initializeWebApp(conf); - String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS, - JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS); - InetSocketAddress address = NetUtils.createSocketAddr(serviceAddr, - JHAdminConfig.DEFAULT_MR_HISTORY_PORT, - JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS); - InetAddress hostNameResolved = null; - try { - hostNameResolved = InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - throw new YarnException(e); - } + InetSocketAddress address = conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_ADDRESS, + JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, + JHAdminConfig.DEFAULT_MR_HISTORY_PORT); server = rpc.getServer(HSClientProtocol.class, protocolHandler, address, @@ -143,31 +133,24 @@ public class HistoryClientService extends AbstractService { } server.start(); - this.bindAddress = - NetUtils.createSocketAddr(hostNameResolved.getHostAddress() - + ":" + server.getPort()); + this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_ADDRESS, + server.getListenerAddress()); LOG.info("Instantiated MRClientService at " + this.bindAddress); - if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { - String resolvedAddress = bindAddress.getHostName() + ":" + bindAddress.getPort(); - conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, resolvedAddress); - - String hostname = getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, - JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS); - hostname = (hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":")) : hostname; - int port = webApp.port(); - resolvedAddress = hostname + ":" + port; - conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, resolvedAddress); - } - super.start(); } private void initializeWebApp(Configuration conf) { webApp = new HsWebApp(history); - String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, - JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS); - WebApps.$for("jobhistory", HistoryClientService.class, this, "ws").with(conf).at(bindAddress).start(webApp); + InetSocketAddress bindAddress = conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, + JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS, + JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT); + // NOTE: there should be a .at(InetSocketAddress) + WebApps.$for("jobhistory", HistoryClientService.class, this, "ws") + .with(conf).at(NetUtils.getHostPortString(bindAddress)).start(webApp); + conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, + webApp.getListenerAddress()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index b51166a11c5..9d4efe639c7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -390,9 +390,7 @@ public class TestClientRedirect { rpc.getServer(protocol, this, address, conf, null, 1); server.start(); - this.bindAddress = - NetUtils.createSocketAddr(hostNameResolved.getHostAddress() - + ":" + server.getPort()); + this.bindAddress = NetUtils.getConnectAddress(server); super.start(); amRunning = true; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index 7db64ddeda5..b18588d9cbb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -85,7 +85,7 @@ public class TestContainerLaunchRPC { ContainerManager proxy = (ContainerManager) rpc.getProxy( ContainerManager.class, - NetUtils.createSocketAddr("localhost:" + server.getPort()), conf); + server.getListenerAddress(), conf); ContainerLaunchContext containerLaunchContext = recordFactory .newRecordInstance(ContainerLaunchContext.class); containerLaunchContext.setUser("dummy-user"); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index 2c127cc6a26..6975db229e5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -75,8 +75,7 @@ public class TestRPC { // Any unrelated protocol would do ClientRMProtocol proxy = (ClientRMProtocol) rpc.getProxy( - ClientRMProtocol.class, NetUtils.createSocketAddr("localhost:" - + server.getPort()), conf); + ClientRMProtocol.class, NetUtils.getConnectAddress(server), conf); try { proxy.getNewApplication(Records @@ -109,7 +108,7 @@ public class TestRPC { RPC.setProtocolEngine(conf, ContainerManagerPB.class, ProtobufRpcEngine.class); ContainerManager proxy = (ContainerManager) rpc.getProxy(ContainerManager.class, - NetUtils.createSocketAddr("localhost:" + server.getPort()), conf); + NetUtils.getConnectAddress(server), conf); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); containerLaunchContext.setUser("dummy-user"); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index bdbe8131a8a..d82186f17c3 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -23,7 +23,6 @@ import static org.apache.hadoop.yarn.service.Service.STATE.STARTED; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.Map; @@ -33,13 +32,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; @@ -244,15 +243,10 @@ public class ContainerManagerImpl extends CompositeService implements } server.start(); - try { - resolvedAddress = InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - throw new YarnException(e); - } - this.context.getNodeId().setHost(resolvedAddress.getCanonicalHostName()); - this.context.getNodeId().setPort(server.getPort()); - LOG.info("ContainerManager started at " - + this.context.getNodeId().toString()); + InetSocketAddress connectAddress = NetUtils.getConnectAddress(server); + this.context.getNodeId().setHost(connectAddress.getHostName()); + this.context.getNodeId().setPort(connectAddress.getPort()); + LOG.info("ContainerManager started at " + connectAddress); super.start(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index c674e18f31d..be7d2d6b117 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -69,7 +69,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.event.Dispatcher; @@ -228,14 +227,9 @@ public class ResourceLocalizationService extends CompositeService cacheCleanupPeriod, cacheCleanupPeriod, TimeUnit.MILLISECONDS); server = createServer(); server.start(); - String host = getConfig().get(YarnConfiguration.NM_LOCALIZER_ADDRESS) - .split(":")[0]; - getConfig().set(YarnConfiguration.NM_LOCALIZER_ADDRESS, host + ":" - + server.getPort()); - localizationServerAddress = getConfig().getSocketAddr( - YarnConfiguration.NM_LOCALIZER_ADDRESS, - YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, - YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT); + localizationServerAddress = + getConfig().updateConnectAddr(YarnConfiguration.NM_LOCALIZER_ADDRESS, + server.getListenerAddress()); LOG.info("Localizer started on port " + server.getPort()); super.start(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index e19bc28939f..56153379deb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager; import static org.mockito.Mockito.mock; import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -88,7 +86,7 @@ public class TestNodeStatusUpdater { .getRecordFactory(null); int heartBeatID = 0; - volatile Error nmStartError = null; + volatile Throwable nmStartError = null; private final List registeredNodes = new ArrayList(); private final Configuration conf = new YarnConfiguration(); private NodeManager nm; @@ -118,12 +116,8 @@ public class TestNodeStatusUpdater { NodeId nodeId = request.getNodeId(); Resource resource = request.getResource(); LOG.info("Registering " + nodeId.toString()); - try { - Assert.assertEquals(InetAddress.getLocalHost().getCanonicalHostName() - + ":12345", nodeId.toString()); - } catch (UnknownHostException e) { - Assert.fail(e.getMessage()); - } + // NOTE: this really should be checking against the config value + Assert.assertEquals("localhost:12345", nodeId.toString()); Assert.assertEquals(5 * 1024, resource.getMemory()); registeredNodes.add(nodeId); RegistrationResponse regResponse = recordFactory @@ -421,8 +415,9 @@ public class TestNodeStatusUpdater { public void run() { try { nm.start(); - } catch (Error e) { + } catch (Throwable e) { TestNodeStatusUpdater.this.nmStartError = e; + throw new YarnException(e); } } }.start(); @@ -433,11 +428,11 @@ public class TestNodeStatusUpdater { int waitCount = 0; while (nm.getServiceState() == STATE.INITED && waitCount++ != 20) { LOG.info("Waiting for NM to start.."); + if (nmStartError != null) { + Assert.fail(nmStartError.getCause().getMessage()); + } Thread.sleep(1000); } - if (nmStartError != null) { - throw nmStartError; - } if (nm.getServiceState() != STATE.STARTED) { // NM could have failed. Assert.fail("NodeManager failed to start"); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java index 84cd7f22b22..76713761ad0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java @@ -95,6 +95,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; +import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; @@ -103,7 +104,14 @@ public class TestResourceLocalizationService { static final Path basedir = new Path("target", TestResourceLocalizationService.class.getName()); - + static Server mockServer; + + @BeforeClass + public static void setup() { + mockServer = mock(Server.class); + doReturn(new InetSocketAddress(123)).when(mockServer).getListenerAddress(); + } + @Test public void testLocalizationInit() throws Exception { final Configuration conf = new Configuration(); @@ -178,7 +186,6 @@ public class TestResourceLocalizationService { } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); - Server ignore = mock(Server.class); LocalizerTracker mockLocallilzerTracker = mock(LocalizerTracker.class); DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); @@ -203,7 +210,7 @@ public class TestResourceLocalizationService { new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler); ResourceLocalizationService spyService = spy(rawService); - doReturn(ignore).when(spyService).createServer(); + doReturn(mockServer).when(spyService).createServer(); doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker( isA(Configuration.class)); doReturn(lfs).when(spyService) @@ -366,7 +373,6 @@ public class TestResourceLocalizationService { } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); - Server ignore = mock(Server.class); DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); @@ -388,7 +394,7 @@ public class TestResourceLocalizationService { new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler); ResourceLocalizationService spyService = spy(rawService); - doReturn(ignore).when(spyService).createServer(); + doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class)); try { spyService.init(conf); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 94e6d9c636e..b0a1a961665 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -120,11 +120,8 @@ public class AdminService extends AbstractService implements RMAdminProtocol { } this.server.start(); - if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { - String resolvedAddress = - this.server.getListenerAddress().getHostName() + ":" + this.server.getListenerAddress().getPort(); - conf.set(YarnConfiguration.RM_ADMIN_ADDRESS, resolvedAddress); - } + conf.updateConnectAddr(YarnConfiguration.RM_ADMIN_ADDRESS, + server.getListenerAddress()); super.start(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 952788e0191..46994b054b6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -31,7 +31,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.util.StringUtils; @@ -120,13 +119,8 @@ public class ApplicationMasterService extends AbstractService implements this.server.start(); this.bindAddress = - NetUtils.createSocketAddr(masterServiceAddress.getHostName(), - this.server.getPort()); - if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { - String resolvedAddress = - this.server.getListenerAddress().getHostName() + ":" + this.server.getListenerAddress().getPort(); - conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resolvedAddress); - } + conf.updateConnectAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, + server.getListenerAddress()); super.start(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 12fa6899257..8ea93ef34f0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -150,11 +150,8 @@ public class ClientRMService extends AbstractService implements } this.server.start(); - if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { - String resolvedAddress = - this.server.getListenerAddress().getHostName() + ":" + this.server.getListenerAddress().getPort(); - conf.set(YarnConfiguration.RM_ADDRESS, resolvedAddress); - } + clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_ADDRESS, + server.getListenerAddress()); super.start(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 7881dbb2c7b..06ad11ff1e3 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -133,11 +133,8 @@ public class ResourceTrackerService extends AbstractService implements } this.server.start(); - if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { - String resolvedAddress = - server.getListenerAddress().getHostName() + ":" + server.getListenerAddress().getPort(); - conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resolvedAddress); - } + conf.updateConnectAddr(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + server.getListenerAddress()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index b2c75cff71c..cf5629fc50f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -399,8 +399,7 @@ public class TestContainerManagerSecurity { Token appToken = new Token(appTokenIdentifier, appTokenSecretManager); - appToken.setService(new Text(schedulerAddr.getHostName() + ":" - + schedulerAddr.getPort())); + SecurityUtil.setTokenService(appToken, schedulerAddr); currentUser.addToken(appToken); SecurityUtil.setTokenService(appToken, schedulerAddr); From 9aae7c22acfcc7b337cf06149864319d2cc877f8 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Thu, 3 May 2012 19:05:38 +0000 Subject: [PATCH 05/70] HDFS-3332. NullPointerException in DN when directoryscanner is trying to report bad blocks. Contributed by Amith D K. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333587 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index f018f53e731..10049e8871c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -235,6 +235,9 @@ class BPServiceActor implements Runnable { } void reportBadBlocks(ExtendedBlock block) { + if (bpRegistration == null) { + return; + } DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) }; LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; From 25882b199b586633cca5c092b9fd8b015cb59476 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 3 May 2012 21:13:43 +0000 Subject: [PATCH 06/70] HDFS-3359. DFSClient.close should close cached sockets. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333624 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/hdfs/DFSClient.java | 2 ++ .../hadoop/hdfs/TestDistributedFileSystem.java | 15 ++++++++++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 18b93789805..913684a9971 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -592,6 +592,8 @@ Release 2.0.0 - UNRELEASED HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS regardless of whether HA or Federation is enabled. (atm) + HDFS-3359. DFSClient.close should close cached sockets. (todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 43b1ba6fb84..969b0581282 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -560,6 +560,7 @@ public class DFSClient implements java.io.Closeable { void abort() { clientRunning = false; closeAllFilesBeingWritten(true); + socketCache.clear(); closeConnectionToNamenode(); } @@ -597,6 +598,7 @@ public class DFSClient implements java.io.Closeable { public synchronized void close() throws IOException { if(clientRunning) { closeAllFilesBeingWritten(false); + socketCache.clear(); clientRunning = false; leaserenewer.closeClient(this); // close connections to the namenode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 4055cd8d3d6..a46a56b92df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -85,6 +85,7 @@ public class TestDistributedFileSystem { /** * Tests DFSClient.close throws no ConcurrentModificationException if * multiple files are open. + * Also tests that any cached sockets are closed. (HDFS-3359) */ @Test public void testDFSClose() throws Exception { @@ -94,11 +95,23 @@ public class TestDistributedFileSystem { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fileSys = cluster.getFileSystem(); - // create two files + // create two files, leaving them open fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-1")); + + // create another file, close it, and read it, so + // the client gets a socket in its SocketCache + Path p = new Path("/non-empty-file"); + DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L); + DFSTestUtil.readFile(fileSys, p); + + DFSClient client = ((DistributedFileSystem)fileSys).dfs; + SocketCache cache = client.socketCache; + assertEquals(1, cache.size()); fileSys.close(); + + assertEquals(0, cache.size()); } finally { if (cluster != null) {cluster.shutdown();} } From 03181022ab238b2d4f59932eb8eadbe7cb52a669 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 3 May 2012 21:57:10 +0000 Subject: [PATCH 07/70] HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has a tunable timeout. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333649 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../java/org/apache/hadoop/net/NetUtils.java | 55 ++++----- .../hadoop/net/SocketIOWithTimeout.java | 4 + .../apache/hadoop/net/SocketInputStream.java | 11 +- .../org/apache/hadoop/net/TestNetUtils.java | 87 ++++++++++++++ .../hadoop/net/TestSocketIOWithTimeout.java | 107 ++++++++++-------- .../hadoop/hdfs/RemoteBlockReader2.java | 11 +- 7 files changed, 187 insertions(+), 91 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d0d134bb306..a64e4a9cd2a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -287,6 +287,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8347. Hadoop Common logs misspell 'successful'. (Philip Zeyliger via eli) + HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has + a tunable timeout. (todd) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index d6bf5d92c3d..0fe61ad21c5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -375,53 +375,44 @@ public class NetUtils { } /** - * Same as getInputStream(socket, socket.getSoTimeout()).

+ * Same as getInputStream(socket, socket.getSoTimeout()). + *

* - * From documentation for {@link #getInputStream(Socket, long)}:
- * Returns InputStream for the socket. If the socket has an associated - * SocketChannel then it returns a - * {@link SocketInputStream} with the given timeout. If the socket does not - * have a channel, {@link Socket#getInputStream()} is returned. In the later - * case, the timeout argument is ignored and the timeout set with - * {@link Socket#setSoTimeout(int)} applies for reads.

- * - * Any socket created using socket factories returned by {@link NetUtils}, - * must use this interface instead of {@link Socket#getInputStream()}. - * * @see #getInputStream(Socket, long) - * - * @param socket - * @return InputStream for reading from the socket. - * @throws IOException */ - public static InputStream getInputStream(Socket socket) + public static SocketInputWrapper getInputStream(Socket socket) throws IOException { return getInputStream(socket, socket.getSoTimeout()); } - + /** - * Returns InputStream for the socket. If the socket has an associated - * SocketChannel then it returns a - * {@link SocketInputStream} with the given timeout. If the socket does not - * have a channel, {@link Socket#getInputStream()} is returned. In the later - * case, the timeout argument is ignored and the timeout set with - * {@link Socket#setSoTimeout(int)} applies for reads.

+ * Return a {@link SocketInputWrapper} for the socket and set the given + * timeout. If the socket does not have an associated channel, then its socket + * timeout will be set to the specified value. Otherwise, a + * {@link SocketInputStream} will be created which reads with the configured + * timeout. * - * Any socket created using socket factories returned by {@link NetUtils}, + * Any socket created using socket factories returned by {@link #NetUtils}, * must use this interface instead of {@link Socket#getInputStream()}. - * + * + * In general, this should be called only once on each socket: see the note + * in {@link SocketInputWrapper#setTimeout(long)} for more information. + * * @see Socket#getChannel() * * @param socket - * @param timeout timeout in milliseconds. This may not always apply. zero - * for waiting as long as necessary. - * @return InputStream for reading from the socket. + * @param timeout timeout in milliseconds. zero for waiting as + * long as necessary. + * @return SocketInputWrapper for reading from the socket. * @throws IOException */ - public static InputStream getInputStream(Socket socket, long timeout) + public static SocketInputWrapper getInputStream(Socket socket, long timeout) throws IOException { - return (socket.getChannel() == null) ? - socket.getInputStream() : new SocketInputStream(socket, timeout); + InputStream stm = (socket.getChannel() == null) ? + socket.getInputStream() : new SocketInputStream(socket); + SocketInputWrapper w = new SocketInputWrapper(socket, stm); + w.setTimeout(timeout); + return w; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java index e51602ff058..18874ecf91b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java @@ -247,6 +247,10 @@ abstract class SocketIOWithTimeout { ops)); } } + + public void setTimeout(long timeoutMs) { + this.timeout = timeoutMs; + } private static String timeoutExceptionString(SelectableChannel channel, long timeout, int ops) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java index ef8c02b7dda..a0b0c3ed0f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java @@ -28,9 +28,6 @@ import java.nio.channels.ReadableByteChannel; import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - /** * This implements an input stream that can have a timeout while reading. * This sets non-blocking flag on the socket channel. @@ -40,9 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability; * IllegalBlockingModeException. * Please use {@link SocketOutputStream} for writing. */ -@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) -@InterfaceStability.Unstable -public class SocketInputStream extends InputStream +class SocketInputStream extends InputStream implements ReadableByteChannel { private Reader reader; @@ -171,4 +166,8 @@ public class SocketInputStream extends InputStream public void waitForReadable() throws IOException { reader.waitForIO(SelectionKey.OP_READ); } + + public void setTimeout(long timeoutMs) { + reader.setTimeout(timeoutMs); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index f10323b8273..61ac35c5cc9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -25,11 +25,14 @@ import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.NetworkInterface; +import java.net.ServerSocket; import java.net.Socket; import java.net.SocketException; +import java.net.SocketTimeoutException; import java.net.URI; import java.net.UnknownHostException; import java.util.Enumeration; +import java.util.concurrent.TimeUnit; import junit.framework.AssertionFailedError; @@ -37,7 +40,11 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.NetUtilsTestResolver; +import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; +import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread; +import org.junit.Assume; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -50,6 +57,13 @@ public class TestNetUtils { private static final int LOCAL_PORT = 8080; private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT); + /** + * Some slop around expected times when making sure timeouts behave + * as expected. We assume that they will be accurate to within + * this threshold. + */ + static final long TIME_FUDGE_MILLIS = 200; + /** * Test that we can't accidentally connect back to the connecting socket due * to a quirk in the TCP spec. @@ -81,6 +95,79 @@ public class TestNetUtils { } } + @Test + public void testSocketReadTimeoutWithChannel() throws Exception { + doSocketReadTimeoutTest(true); + } + + @Test + public void testSocketReadTimeoutWithoutChannel() throws Exception { + doSocketReadTimeoutTest(false); + } + + + private void doSocketReadTimeoutTest(boolean withChannel) + throws IOException { + // Binding a ServerSocket is enough to accept connections. + // Rely on the backlog to accept for us. + ServerSocket ss = new ServerSocket(0); + + Socket s; + if (withChannel) { + s = NetUtils.getDefaultSocketFactory(new Configuration()) + .createSocket(); + Assume.assumeNotNull(s.getChannel()); + } else { + s = new Socket(); + assertNull(s.getChannel()); + } + + SocketInputWrapper stm = null; + try { + NetUtils.connect(s, ss.getLocalSocketAddress(), 1000); + + stm = NetUtils.getInputStream(s, 1000); + assertReadTimeout(stm, 1000); + + // Change timeout, make sure it applies. + stm.setTimeout(1); + assertReadTimeout(stm, 1); + + // If there is a channel, then setting the socket timeout + // should not matter. If there is not a channel, it will + // take effect. + s.setSoTimeout(1000); + if (withChannel) { + assertReadTimeout(stm, 1); + } else { + assertReadTimeout(stm, 1000); + } + } finally { + IOUtils.closeStream(stm); + IOUtils.closeSocket(s); + ss.close(); + } + } + + private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis) + throws IOException { + long st = System.nanoTime(); + try { + stm.read(); + fail("Didn't time out"); + } catch (SocketTimeoutException ste) { + assertTimeSince(st, timeoutMillis); + } + } + + private void assertTimeSince(long startNanos, int expectedMillis) { + long durationNano = System.nanoTime() - startNanos; + long millis = TimeUnit.MILLISECONDS.convert( + durationNano, TimeUnit.NANOSECONDS); + assertTrue("Expected " + expectedMillis + "ms, but took " + millis, + Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS); + } + /** * Test for { * @throws UnknownHostException @link NetUtils#getLocalInetAddress(String) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java index 0c887eb82b8..5e3116e89ed 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java @@ -19,6 +19,7 @@ package org.apache.hadoop.net; import java.io.IOException; import java.io.InputStream; +import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.SocketTimeoutException; import java.nio.channels.Pipe; @@ -26,8 +27,13 @@ import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.MultithreadedTestUtil; +import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; +import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread; -import junit.framework.TestCase; +import org.junit.Test; +import static org.junit.Assert.*; /** * This tests timout out from SocketInputStream and @@ -36,14 +42,17 @@ import junit.framework.TestCase; * Normal read and write using these streams are tested by pretty much * every DFS unit test. */ -public class TestSocketIOWithTimeout extends TestCase { +public class TestSocketIOWithTimeout { static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class); private static int TIMEOUT = 1*1000; private static String TEST_STRING = "1234567890"; + + private MultithreadedTestUtil.TestContext ctx = new TestContext(); - private void doIO(InputStream in, OutputStream out) throws IOException { + private void doIO(InputStream in, OutputStream out, + int expectedTimeout) throws IOException { /* Keep on writing or reading until we get SocketTimeoutException. * It expects this exception to occur within 100 millis of TIMEOUT. */ @@ -61,34 +70,15 @@ public class TestSocketIOWithTimeout extends TestCase { long diff = System.currentTimeMillis() - start; LOG.info("Got SocketTimeoutException as expected after " + diff + " millis : " + e.getMessage()); - assertTrue(Math.abs(TIMEOUT - diff) <= 200); + assertTrue(Math.abs(expectedTimeout - diff) <= + TestNetUtils.TIME_FUDGE_MILLIS); break; } } } - /** - * Just reads one byte from the input stream. - */ - static class ReadRunnable implements Runnable { - private InputStream in; - - public ReadRunnable(InputStream in) { - this.in = in; - } - public void run() { - try { - in.read(); - } catch (IOException e) { - LOG.info("Got expection while reading as expected : " + - e.getMessage()); - return; - } - assertTrue(false); - } - } - - public void testSocketIOWithTimeout() throws IOException { + @Test + public void testSocketIOWithTimeout() throws Exception { // first open pipe: Pipe pipe = Pipe.open(); @@ -96,7 +86,7 @@ public class TestSocketIOWithTimeout extends TestCase { Pipe.SinkChannel sink = pipe.sink(); try { - InputStream in = new SocketInputStream(source, TIMEOUT); + final InputStream in = new SocketInputStream(source, TIMEOUT); OutputStream out = new SocketOutputStream(sink, TIMEOUT); byte[] writeBytes = TEST_STRING.getBytes(); @@ -105,37 +95,62 @@ public class TestSocketIOWithTimeout extends TestCase { out.write(writeBytes); out.write(byteWithHighBit); - doIO(null, out); + doIO(null, out, TIMEOUT); in.read(readBytes); assertTrue(Arrays.equals(writeBytes, readBytes)); assertEquals(byteWithHighBit & 0xff, in.read()); - doIO(in, null); + doIO(in, null, TIMEOUT); + + // Change timeout on the read side. + ((SocketInputStream)in).setTimeout(TIMEOUT * 2); + doIO(in, null, TIMEOUT * 2); + /* * Verify that it handles interrupted threads properly. - * Use a large timeout and expect the thread to return quickly. + * Use a large timeout and expect the thread to return quickly + * upon interruption. */ - in = new SocketInputStream(source, 0); - Thread thread = new Thread(new ReadRunnable(in)); - thread.start(); - - try { - Thread.sleep(1000); - } catch (InterruptedException ignored) {} - + ((SocketInputStream)in).setTimeout(0); + TestingThread thread = new TestingThread(ctx) { + @Override + public void doWork() throws Exception { + try { + in.read(); + fail("Did not fail with interrupt"); + } catch (InterruptedIOException ste) { + LOG.info("Got expection while reading as expected : " + + ste.getMessage()); + } + } + }; + ctx.addThread(thread); + ctx.startThreads(); + // If the thread is interrupted before it calls read() + // then it throws ClosedByInterruptException due to + // some Java quirk. Waiting for it to call read() + // gets it into select(), so we get the expected + // InterruptedIOException. + Thread.sleep(1000); thread.interrupt(); - - try { - thread.join(); - } catch (InterruptedException e) { - throw new IOException("Unexpected InterruptedException : " + e); - } - + ctx.stop(); + //make sure the channels are still open assertTrue(source.isOpen()); assertTrue(sink.isOpen()); - + + // Nevertheless, the output stream is closed, because + // a partial write may have succeeded (see comment in + // SocketOutputStream#write(byte[]), int, int) + try { + out.write(1); + fail("Did not throw"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "stream is closed", ioe); + } + out.close(); assertFalse(sink.isOpen()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 0713de8ca8f..fe4dc55c8d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.SocketInputStream; +import org.apache.hadoop.net.SocketInputWrapper; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -450,11 +450,8 @@ public class RemoteBlockReader2 implements BlockReader { // // Get bytes in block, set streams // - Preconditions.checkArgument(sock.getChannel() != null, - "Socket %s does not have an associated Channel.", - sock); - SocketInputStream sin = - (SocketInputStream)NetUtils.getInputStream(sock); + SocketInputWrapper sin = NetUtils.getInputStream(sock); + ReadableByteChannel ch = sin.getReadableByteChannel(); DataInputStream in = new DataInputStream(sin); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( @@ -477,7 +474,7 @@ public class RemoteBlockReader2 implements BlockReader { } return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(), - sin, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock); + ch, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock); } static void checkSuccess( From 6b58fa6494eb9f7cca91ccd86b4b518b59b4aa5e Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 3 May 2012 21:58:08 +0000 Subject: [PATCH 08/70] Amend previous commit of HADOOP-8350 (missed new SocketInputWrapper file) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333650 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/net/SocketInputWrapper.java | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java new file mode 100644 index 00000000000..f5cbe17519d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.net; + +import java.io.FilterInputStream; + +import java.io.InputStream; +import java.net.Socket; +import java.net.SocketException; +import java.nio.channels.ReadableByteChannel; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.base.Preconditions; + +/** + * A wrapper stream around a socket which allows setting of its timeout. If the + * socket has a channel, this uses non-blocking IO via the package-private + * {@link SocketInputStream} implementation. Otherwise, timeouts are managed by + * setting the underlying socket timeout itself. + */ +@InterfaceAudience.LimitedPrivate("HDFS") +@InterfaceStability.Unstable +public class SocketInputWrapper extends FilterInputStream { + private final Socket socket; + private final boolean hasChannel; + + SocketInputWrapper(Socket s, InputStream is) { + super(is); + this.socket = s; + this.hasChannel = s.getChannel() != null; + if (hasChannel) { + Preconditions.checkArgument(is instanceof SocketInputStream, + "Expected a SocketInputStream when there is a channel. " + + "Got: %s", is); + } + } + + /** + * Set the timeout for reads from this stream. + * + * Note: the behavior here can differ subtly depending on whether the + * underlying socket has an associated Channel. In particular, if there is no + * channel, then this call will affect the socket timeout for all + * readers of this socket. If there is a channel, then this call will affect + * the timeout only for this stream. As such, it is recommended to + * only create one {@link SocketInputWrapper} instance per socket. + * + * @param timeoutMs + * the new timeout, 0 for no timeout + * @throws SocketException + * if the timeout cannot be set + */ + public void setTimeout(long timeoutMs) throws SocketException { + if (hasChannel) { + ((SocketInputStream)in).setTimeout(timeoutMs); + } else { + socket.setSoTimeout((int)timeoutMs); + } + } + + /** + * @return an underlying ReadableByteChannel implementation. + * @throws IllegalStateException if this socket does not have a channel + */ + public ReadableByteChannel getReadableByteChannel() { + Preconditions.checkState(hasChannel, + "Socket %s does not have a channel", + this.socket); + return (SocketInputStream)in; + } +} \ No newline at end of file From dd8b7ae9d884ea22a68800fb0f8a1a991ac80834 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 3 May 2012 23:33:44 +0000 Subject: [PATCH 09/70] HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(), and remove synchronized from updatePermissionStatus(..). git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333679 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/namenode/INode.java | 55 +++++++------------ .../hdfs/server/namenode/INodeDirectory.java | 9 ++- 3 files changed, 26 insertions(+), 41 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 913684a9971..7d53324acc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -594,6 +594,9 @@ Release 2.0.0 - UNRELEASED HDFS-3359. DFSClient.close should close cached sockets. (todd) + HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(), + and remove synchronized from updatePermissionStatus(..). (szetszwo) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index cdad315f7a4..2bc049dbfa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.util.StringUtils; +import com.google.common.primitives.SignedBytes; + /** * We keep an in-memory representation of the file/block hierarchy. * This is a base INode class containing common fields for file and @@ -143,8 +145,7 @@ abstract class INode implements Comparable, FSInodeInfo { protected PermissionStatus getPermissionStatus() { return new PermissionStatus(getUserName(),getGroupName(),getFsPermission()); } - private synchronized void updatePermissionStatus( - PermissionStatusFormat f, long n) { + private void updatePermissionStatus(PermissionStatusFormat f, long n) { permission = f.combine(n, permission); } /** Get user name */ @@ -400,48 +401,30 @@ abstract class INode implements Comparable, FSInodeInfo { } } - // - // Comparable interface - // - public int compareTo(byte[] o) { - return compareBytes(name, o); + private static final byte[] EMPTY_BYTES = {}; + + @Override + public final int compareTo(byte[] bytes) { + final byte[] left = name == null? EMPTY_BYTES: name; + final byte[] right = bytes == null? EMPTY_BYTES: bytes; + return SignedBytes.lexicographicalComparator().compare(left, right); } - public boolean equals(Object o) { - if (!(o instanceof INode)) { + @Override + public final boolean equals(Object that) { + if (this == that) { + return true; + } + if (that == null || !(that instanceof INode)) { return false; } - return Arrays.equals(this.name, ((INode)o).name); + return Arrays.equals(this.name, ((INode)that).name); } - public int hashCode() { + @Override + public final int hashCode() { return Arrays.hashCode(this.name); } - - // - // static methods - // - /** - * Compare two byte arrays. - * - * @return a negative integer, zero, or a positive integer - * as defined by {@link #compareTo(byte[])}. - */ - static int compareBytes(byte[] a1, byte[] a2) { - if (a1==a2) - return 0; - int len1 = (a1==null ? 0 : a1.length); - int len2 = (a2==null ? 0 : a2.length); - int n = Math.min(len1, len2); - byte b1, b2; - for (int i=0; i Date: Fri, 4 May 2012 03:10:26 +0000 Subject: [PATCH 10/70] HADOOP-8356. FileSystem service loading mechanism should print the FileSystem impl it is failing to load (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333744 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index a64e4a9cd2a..3885f35da08 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -290,6 +290,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has a tunable timeout. (todd) + HADOOP-8356. FileSystem service loading mechanism should print the FileSystem + impl it is failing to load (tucu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 9229b84e8ff..b8879a29d59 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -199,7 +199,7 @@ public abstract class FileSystem extends Configured implements Closeable { * @return the protocol scheme for the FileSystem. */ public String getScheme() { - throw new UnsupportedOperationException("Not implemented by the FileSystem implementation"); + throw new UnsupportedOperationException("Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation"); } /** Returns a URI whose scheme and authority identify this FileSystem.*/ From a7993ef5e8d34c16b1dbaaa562048456770dbf4b Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 4 May 2012 03:13:37 +0000 Subject: [PATCH 11/70] HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333746 13f79535-47bb-0310-9956-ffa450edef68 --- .../authentication/server/AuthenticationFilter.java | 13 ++++++++----- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index b37f39a50c6..28a4d3de90a 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -327,6 +327,8 @@ public class AuthenticationFilter implements Filter { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) throws IOException, ServletException { + boolean unauthorizedResponse = true; + String unauthorizedMsg = ""; HttpServletRequest httpRequest = (HttpServletRequest) request; HttpServletResponse httpResponse = (HttpServletResponse) response; try { @@ -350,6 +352,7 @@ public class AuthenticationFilter implements Filter { newToken = true; } if (token != null) { + unauthorizedResponse = false; if (LOG.isDebugEnabled()) { LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName()); } @@ -378,17 +381,17 @@ public class AuthenticationFilter implements Filter { } filterChain.doFilter(httpRequest, httpResponse); } - else { - throw new AuthenticationException("Missing AuthenticationToken"); - } } catch (AuthenticationException ex) { + unauthorizedMsg = ex.toString(); + LOG.warn("Authentication exception: " + ex.getMessage(), ex); + } + if (unauthorizedResponse) { if (!httpResponse.isCommitted()) { Cookie cookie = createCookie(""); cookie.setMaxAge(0); httpResponse.addCookie(cookie); - httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, ex.getMessage()); + httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, unauthorizedMsg); } - LOG.warn("Authentication exception: " + ex.getMessage(), ex); } } diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3885f35da08..7cff92b1fcc 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -418,6 +418,8 @@ Release 2.0.0 - UNRELEASED HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due to fixes introduced by the IBM JDK compatibility patch. (ddas) + HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) From 0fca4fbaf35ad6b48c57125a9389aaf45a2c88a5 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 4 May 2012 03:20:16 +0000 Subject: [PATCH 12/70] MAPREDUCE-4205. retrofit all JVM shutdown hooks to use ShutdownHookManager (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333748 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop/mapreduce/v2/app/MRAppMaster.java | 18 +++++++++--------- .../mapreduce/v2/hs/JobHistoryServer.java | 12 ++++++++++-- .../hadoop/yarn/service/CompositeService.java | 3 +-- .../yarn/server/nodemanager/NodeManager.java | 12 ++++++++++-- .../resourcemanager/ResourceManager.java | 12 ++++++++++-- .../server/webproxy/WebAppProxyServer.java | 12 ++++++++++-- .../java/org/apache/hadoop/tools/DistCp.java | 12 ++++++++++-- 8 files changed, 63 insertions(+), 21 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 2cd5a224c62..f852d993819 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -163,6 +163,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-4219. make default container-executor.conf.dir be a path relative to the container-executor binary. (rvs via tucu) + MAPREDUCE-4205. retrofit all JVM shutdown hooks to use ShutdownHookManager + (tucu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index f76ae5a9db3..2d6f3121485 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -90,6 +90,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.yarn.Clock; import org.apache.hadoop.yarn.ClusterInfo; import org.apache.hadoop.yarn.SystemClock; @@ -130,6 +131,11 @@ public class MRAppMaster extends CompositeService { private static final Log LOG = LogFactory.getLog(MRAppMaster.class); + /** + * Priority of the MRAppMaster shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private Clock clock; private final long startTime; private final long appSubmitTime; @@ -990,8 +996,8 @@ public class MRAppMaster extends CompositeService { new MRAppMaster(applicationAttemptId, containerId, nodeHostString, Integer.parseInt(nodePortString), Integer.parseInt(nodeHttpPortString), appSubmitTime); - Runtime.getRuntime().addShutdownHook( - new MRAppMasterShutdownHook(appMaster)); + ShutdownHookManager.get().addShutdownHook( + new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE)); String jobUserName = System @@ -1010,7 +1016,7 @@ public class MRAppMaster extends CompositeService { // The shutdown hook that runs when a signal is received AND during normal // close of the JVM. - static class MRAppMasterShutdownHook extends Thread { + static class MRAppMasterShutdownHook implements Runnable { MRAppMaster appMaster; MRAppMasterShutdownHook(MRAppMaster appMaster) { this.appMaster = appMaster; @@ -1028,12 +1034,6 @@ public class MRAppMaster extends CompositeService { appMaster.jobHistoryEventHandler.setSignalled(true); } appMaster.stop(); - try { - //Close all the FileSystem objects - FileSystem.closeAll(); - } catch (IOException ioe) { - LOG.warn("Failed to close all FileSystem objects", ioe); - } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java index 3c7bd8ccd81..00b3c70deb3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -40,6 +41,12 @@ import org.apache.hadoop.yarn.service.CompositeService; * *****************************************************************/ public class JobHistoryServer extends CompositeService { + + /** + * Priority of the JobHistoryServer shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private static final Log LOG = LogFactory.getLog(JobHistoryServer.class); private HistoryContext historyContext; private HistoryClientService clientService; @@ -118,8 +125,9 @@ public class JobHistoryServer extends CompositeService { StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG); try { JobHistoryServer jobHistoryServer = new JobHistoryServer(); - Runtime.getRuntime().addShutdownHook( - new CompositeServiceShutdownHook(jobHistoryServer)); + ShutdownHookManager.get().addShutdownHook( + new CompositeServiceShutdownHook(jobHistoryServer), + SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); jobHistoryServer.init(conf); jobHistoryServer.start(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java index 00cebcfef46..cd4e52349e4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java @@ -107,12 +107,11 @@ public class CompositeService extends AbstractService { * JVM Shutdown hook for CompositeService which will stop the give * CompositeService gracefully in case of JVM shutdown. */ - public static class CompositeServiceShutdownHook extends Thread { + public static class CompositeServiceShutdownHook implements Runnable { private CompositeService compositeService; public CompositeServiceShutdownHook(CompositeService compositeService) { - super("CompositeServiceShutdownHook for " + compositeService.getName()); this.compositeService = compositeService; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 5e856e5c997..e7b6b91cf2e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -54,6 +55,12 @@ import org.apache.hadoop.yarn.util.Records; public class NodeManager extends CompositeService implements ServiceStateChangeListener { + + /** + * Priority of the NodeManager shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private static final Log LOG = LogFactory.getLog(NodeManager.class); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); protected ContainerTokenSecretManager containerTokenSecretManager; @@ -250,11 +257,12 @@ public class NodeManager extends CompositeService implements // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { - Runtime.getRuntime().removeShutdownHook(nodeManagerShutdownHook); + ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); - Runtime.getRuntime().addShutdownHook(nodeManagerShutdownHook); + ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, + SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(); this.init(conf); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index b2672265331..abf3ff9dfe9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -32,6 +32,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -87,6 +88,12 @@ import org.apache.hadoop.yarn.webapp.WebApps.Builder; */ @SuppressWarnings("unchecked") public class ResourceManager extends CompositeService implements Recoverable { + + /** + * Priority of the ResourceManager shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private static final Log LOG = LogFactory.getLog(ResourceManager.class); public static final long clusterTimeStamp = System.currentTimeMillis(); @@ -613,8 +620,9 @@ public class ResourceManager extends CompositeService implements Recoverable { Configuration conf = new YarnConfiguration(); Store store = StoreFactory.getStore(conf); ResourceManager resourceManager = new ResourceManager(store); - Runtime.getRuntime().addShutdownHook( - new CompositeServiceShutdownHook(resourceManager)); + ShutdownHookManager.get().addShutdownHook( + new CompositeServiceShutdownHook(resourceManager), + SHUTDOWN_HOOK_PRIORITY); resourceManager.init(conf); //resourceManager.recover(store.restore()); //store.doneWithRecovery(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java index 469b25918d6..64502237317 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -34,6 +35,12 @@ import org.apache.hadoop.yarn.service.CompositeService; * web interfaces. */ public class WebAppProxyServer extends CompositeService { + + /** + * Priority of the ResourceManager shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private static final Log LOG = LogFactory.getLog(WebAppProxyServer.class); private WebAppProxy proxy = null; @@ -69,8 +76,9 @@ public class WebAppProxyServer extends CompositeService { StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG); try { WebAppProxyServer proxy = new WebAppProxyServer(); - Runtime.getRuntime().addShutdownHook( - new CompositeServiceShutdownHook(proxy)); + ShutdownHookManager.get().addShutdownHook( + new CompositeServiceShutdownHook(proxy), + SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(); proxy.init(conf); proxy.start(); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java index 6f1df5f4fe7..05a581f43fc 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java @@ -33,6 +33,7 @@ import org.apache.hadoop.tools.CopyListing.*; import org.apache.hadoop.tools.mapred.CopyMapper; import org.apache.hadoop.tools.mapred.CopyOutputFormat; import org.apache.hadoop.tools.util.DistCpUtils; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -49,6 +50,12 @@ import java.util.Random; * behaviour. */ public class DistCp extends Configured implements Tool { + + /** + * Priority of the ResourceManager shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private static final Log LOG = LogFactory.getLog(DistCp.class); private DistCpOptions inputOptions; @@ -353,7 +360,8 @@ public class DistCp extends Configured implements Tool { DistCp distCp = new DistCp(); Cleanup CLEANUP = new Cleanup(distCp); - Runtime.getRuntime().addShutdownHook(CLEANUP); + ShutdownHookManager.get().addShutdownHook(CLEANUP, + SHUTDOWN_HOOK_PRIORITY); System.exit(ToolRunner.run(getDefaultConf(), distCp, argv)); } catch (Exception e) { @@ -388,7 +396,7 @@ public class DistCp extends Configured implements Tool { return submitted; } - private static class Cleanup extends Thread { + private static class Cleanup implements Runnable { private final DistCp distCp; public Cleanup(DistCp distCp) { From a70587f368a6519fceb0388c14befec4e97e8293 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 4 May 2012 03:31:44 +0000 Subject: [PATCH 13/70] HADOOP-8343. Allow configuration of authorization for JmxJsonServlet and MetricsServlet (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1333750 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../org/apache/hadoop/conf/ConfServlet.java | 6 +-- .../fs/CommonConfigurationKeysPublic.java | 3 ++ .../org/apache/hadoop/http/HttpServer.java | 39 ++++++++++++++++--- .../org/apache/hadoop/jmx/JMXJsonServlet.java | 5 +-- .../apache/hadoop/metrics/MetricsServlet.java | 5 +-- .../src/main/resources/core-default.xml | 9 +++++ .../hadoop/http/HttpServerFunctionalTest.java | 12 ++++++ .../apache/hadoop/http/TestHttpServer.java | 23 ++++++++++- 9 files changed, 89 insertions(+), 16 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7cff92b1fcc..8bb39d508b8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -163,6 +163,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8210. Common side of HDFS-3148: The client should be able to use multiple local interfaces for data transfer. (eli) + HADOOP-8343. Allow configuration of authorization for JmxJsonServlet and + MetricsServlet (tucu) + IMPROVEMENTS HADOOP-7524. Change RPC to allow multiple protocols including multuple diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java index 5ca8537135f..da39fa57b74 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java @@ -18,7 +18,6 @@ package org.apache.hadoop.conf; import java.io.IOException; -import java.io.OutputStreamWriter; import java.io.Writer; import javax.servlet.ServletException; @@ -57,9 +56,8 @@ public class ConfServlet extends HttpServlet { public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { return; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 9bc5c374593..67f3bc594c9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -228,6 +228,9 @@ public class CommonConfigurationKeysPublic { public static final String HADOOP_SECURITY_AUTHORIZATION = "hadoop.security.authorization"; /** See core-default.xml */ + public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN = + "hadoop.security.instrumentation.requires.admin"; + /** See core-default.xml */ public static final String HADOOP_SECURITY_SERVICE_USER_NAME_KEY = "hadoop.security.service.user.name.key"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index 6a2c9fa360c..ab3e5999e5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -779,6 +779,37 @@ public class HttpServer implements FilterContainer { : "Inactive HttpServer"; } + /** + * Checks the user has privileges to access to instrumentation servlets. + *

+ * If hadoop.security.instrumentation.requires.admin is set to FALSE + * (default value) it always returns TRUE. + *

+ * If hadoop.security.instrumentation.requires.admin is set to TRUE + * it will check that if the current user is in the admin ACLS. If the user is + * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + * + * @param servletContext the servlet context. + * @param request the servlet request. + * @param response the servlet response. + * @return TRUE/FALSE based on the logic decribed above. + */ + public static boolean isInstrumentationAccessAllowed( + ServletContext servletContext, HttpServletRequest request, + HttpServletResponse response) throws IOException { + Configuration conf = + (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + + boolean access = true; + boolean adminAccess = conf.getBoolean( + CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, + false); + if (adminAccess) { + access = hasAdministratorAccess(servletContext, request, response); + } + return access; + } + /** * Does the user sending the HttpServletRequest has the administrator ACLs? If * it isn't the case, response will be modified to send an error to the user. @@ -794,7 +825,6 @@ public class HttpServer implements FilterContainer { HttpServletResponse response) throws IOException { Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); - // If there is no authorization, anybody has administrator access. if (!conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { @@ -834,12 +864,11 @@ public class HttpServer implements FilterContainer { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - response.setContentType("text/plain; charset=UTF-8"); - // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { return; } + response.setContentType("text/plain; charset=UTF-8"); PrintWriter out = response.getWriter(); ReflectionUtils.printThreadInfo(out, ""); out.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java index cc46aacd22b..8dc83a3c716 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java @@ -148,9 +148,8 @@ public class JMXJsonServlet extends HttpServlet { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) { try { - // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { return; } JsonGenerator jg = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java index 92c342108d1..af469f9a34d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java @@ -106,9 +106,8 @@ public class MetricsServlet extends HttpServlet { public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { return; } diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 928e473d727..f94e49782eb 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -62,6 +62,15 @@ Is service-level authorization enabled? + + hadoop.security.instrumentation.requires.admin + false + + Indicates if administrator ACLs are required to access + instrumentation servlets (JMX, METRICS, CONF, STACKS). + + + hadoop.security.authentication simple diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java index b32d2a2d2cd..6dee7eb7134 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.http; +import org.apache.hadoop.security.authorize.AccessControlList; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; @@ -70,6 +71,12 @@ public class HttpServerFunctionalTest extends Assert { return createServer(TEST, conf); } + public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl) + throws IOException { + prepareTestWebapp(); + return createServer(TEST, conf, adminsAcl); + } + /** * Create but do not start the test webapp server. The test webapp dir is * prepared/checked in advance. @@ -132,6 +139,11 @@ public class HttpServerFunctionalTest extends Assert { throws IOException { return new HttpServer(webapp, "0.0.0.0", 0, true, conf); } + + public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl) + throws IOException { + return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl); + } /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index bd9c230c50c..a4d5c5a9c4e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -60,7 +60,6 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import org.mockito.Mock; import org.mockito.Mockito; import org.mortbay.util.ajax.JSON; @@ -360,6 +359,8 @@ public class TestHttpServer extends HttpServerFunctionalTest { Configuration conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, + true); conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY, DummyFilterInitializer.class.getName()); @@ -468,6 +469,26 @@ public class TestHttpServer extends HttpServerFunctionalTest { } + @Test + public void testRequiresAuthorizationAccess() throws Exception { + Configuration conf = new Configuration(); + ServletContext context = Mockito.mock(ServletContext.class); + Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf); + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + //requires admin access to instrumentation, FALSE by default + Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response)); + + //requires admin access to instrumentation, TRUE + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); + AccessControlList acls = Mockito.mock(AccessControlList.class); + Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); + Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response)); + } + @Test public void testBindAddress() throws Exception { checkBindAddress("0.0.0.0", 0, false).stop(); // hang onto this one for a bit more testing From 48f95779c1b2b631168235303655e7920abc5ae6 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 4 May 2012 15:02:54 +0000 Subject: [PATCH 14/70] MAPREDUCE-4048. NullPointerException exception while accessing the Application Master UI (Devaraj K via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334013 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../v2/app/webapp/AppController.java | 11 ++- .../v2/app/webapp/TestAppController.java | 71 +++++++++++++++++++ 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index f852d993819..c4046c64b69 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -452,6 +452,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4163. consistently set the bind address (Daryn Sharp via bobby) + MAPREDUCE-4048. NullPointerException exception while accessing the + Application Master UI (Devaraj K via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java index f9583da5a97..da537e5bc71 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java @@ -27,6 +27,8 @@ import java.util.Locale; import javax.servlet.http.HttpServletResponse; import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -47,6 +49,8 @@ import com.google.inject.Inject; * This class renders the various pages that the web app supports. */ public class AppController extends Controller implements AMParams { + private static final Log LOG = LogFactory.getLog(AppController.class); + protected final App app; protected AppController(App app, Configuration conf, RequestContext ctx, @@ -220,6 +224,8 @@ public class AppController extends Controller implements AMParams { toString().toLowerCase(Locale.US)); setTitle(join(tt, " Tasks for ", $(JOB_ID))); } catch (Exception e) { + LOG.error("Failed to render tasks page with task type : " + + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e); badRequest(e.getMessage()); } } @@ -283,6 +289,8 @@ public class AppController extends Controller implements AMParams { render(attemptsPage()); } catch (Exception e) { + LOG.error("Failed to render attempts page with task type : " + + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e); badRequest(e.getMessage()); } } @@ -316,7 +324,8 @@ public class AppController extends Controller implements AMParams { */ void badRequest(String s) { setStatus(HttpServletResponse.SC_BAD_REQUEST); - setTitle(join("Bad request: ", s)); + String title = "Bad request: "; + setTitle((s != null) ? join(title, s) : title); } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java new file mode 100644 index 00000000000..4fcb4755736 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp; + +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.v2.app.AppContext; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.webapp.Controller.RequestContext; +import org.junit.Before; +import org.junit.Test; + +public class TestAppController { + + private AppController appController; + private RequestContext ctx; + + @Before + public void setUp() { + AppContext context = mock(AppContext.class); + when(context.getApplicationID()).thenReturn( + Records.newRecord(ApplicationId.class)); + App app = new App(context); + Configuration conf = new Configuration(); + ctx = mock(RequestContext.class); + appController = new AppController(app, conf, ctx); + } + + @Test + public void testBadRequest() { + String message = "test string"; + appController.badRequest(message); + verifyExpectations(message); + } + + @Test + public void testBadRequestWithNullMessage() { + // It should not throw NullPointerException + appController.badRequest(null); + verifyExpectations(StringUtils.EMPTY); + } + + private void verifyExpectations(String message) { + verify(ctx).setStatus(400); + verify(ctx).set("app.id", "application_0_0000"); + verify(ctx).set(eq("rm.web"), anyString()); + verify(ctx).set("title", "Bad request: " + message); + } +} From 407cfa3b56a0645d64d2d9af305f6ef24307e775 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 4 May 2012 15:47:02 +0000 Subject: [PATCH 15/70] MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState (Bikas Saha via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334043 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../hadoop/yarn/api/records/NodeState.java | 2 - .../ApplicationMasterService.java | 3 +- .../resourcemanager/ClientRMService.java | 3 +- .../resourcemanager/rmapp/RMAppImpl.java | 4 +- .../server/resourcemanager/rmnode/RMNode.java | 4 +- .../resourcemanager/rmnode/RMNodeImpl.java | 75 ++++++++++--------- .../resourcemanager/rmnode/RMNodeState.java | 45 ----------- .../resourcemanager/webapp/NodesPage.java | 10 +-- .../resourcemanager/webapp/RMWebServices.java | 6 +- .../resourcemanager/webapp/dao/NodeInfo.java | 4 +- .../server/resourcemanager/MockNodes.java | 22 +++--- .../yarn/server/resourcemanager/MockRM.java | 4 +- .../TestAMRMRPCNodeUpdates.java | 3 +- .../resourcemanager/webapp/TestNodesPage.java | 4 +- .../resourcemanager/webapp/TestRMWebApp.java | 6 +- .../webapp/TestRMWebServicesNodes.java | 53 +++++++------ 17 files changed, 102 insertions(+), 149 deletions(-) delete mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index c4046c64b69..3c04711f3ed 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -281,6 +281,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-3173. MRV2 UI doesn't work properly without internet (Devaraj K via bobby) + MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState + (Bikas Saha via bobby) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java index 6689b72f3bd..31916c6be8a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.api.records; -// TODO NodeState is a clone of RMNodeState made for MR-3353. In a subsequent -// patch RMNodeState should be replaced with NodeState /** *

State of a Node.

*/ diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 46994b054b6..849111e54b5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -56,7 +56,6 @@ import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -302,7 +301,7 @@ public class ApplicationMasterService extends AbstractService implements numContainers = schedulerNodeReport.getNumContainers(); } NodeReport report = BuilderUtils.newNodeReport(rmNode.getNodeID(), - RMNodeState.toNodeState(rmNode.getState()), + rmNode.getState(), rmNode.getHttpAddress(), rmNode.getRackName(), used, rmNode.getTotalCapability(), numContainers, rmNode.getNodeHealthStatus()); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 8ea93ef34f0..8a007d536c8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -79,7 +79,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; @@ -414,7 +413,7 @@ public class ClientRMService extends AbstractService implements } NodeReport report = BuilderUtils.newNodeReport(rmNode.getNodeID(), - RMNodeState.toNodeState(rmNode.getState()), + rmNode.getState(), rmNode.getHttpAddress(), rmNode.getRackName(), used, rmNode.getTotalCapability(), numContainers, rmNode.getNodeHealthStatus()); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 46e17b33b48..43fc99196eb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; @@ -56,7 +57,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanAppEvent; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.state.InvalidStateTransitonException; import org.apache.hadoop.yarn.state.MultipleArcTransition; @@ -498,7 +498,7 @@ public class RMAppImpl implements RMApp { } private void processNodeUpdate(RMAppNodeUpdateType type, RMNode node) { - RMNodeState nodeState = node.getState(); + NodeState nodeState = node.getState(); updatedNodes.add(node); LOG.debug("Received node update event:" + type + " for node:" + node + " with state:" + nodeState); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index 8dda6eda1de..006e13e6105 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -23,10 +23,10 @@ import java.util.List; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; /** @@ -99,7 +99,7 @@ public interface RMNode { */ public Node getNode(); - public RMNodeState getState(); + public NodeState getState(); public List getContainersToCleanUp(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 5357695cfaa..9a926e0ced6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -104,53 +105,53 @@ public class RMNodeImpl implements RMNode, EventHandler { .newRecordInstance(HeartbeatResponse.class); private static final StateMachineFactory stateMachineFactory = new StateMachineFactory(RMNodeState.NEW) + RMNodeEvent>(NodeState.NEW) //Transitions from NEW state - .addTransition(RMNodeState.NEW, RMNodeState.RUNNING, + .addTransition(NodeState.NEW, NodeState.RUNNING, RMNodeEventType.STARTED, new AddNodeTransition()) //Transitions from RUNNING state - .addTransition(RMNodeState.RUNNING, - EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY), + .addTransition(NodeState.RUNNING, + EnumSet.of(NodeState.RUNNING, NodeState.UNHEALTHY), RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition()) - .addTransition(RMNodeState.RUNNING, RMNodeState.DECOMMISSIONED, + .addTransition(NodeState.RUNNING, NodeState.DECOMMISSIONED, RMNodeEventType.DECOMMISSION, - new DeactivateNodeTransition(RMNodeState.DECOMMISSIONED)) - .addTransition(RMNodeState.RUNNING, RMNodeState.LOST, + new DeactivateNodeTransition(NodeState.DECOMMISSIONED)) + .addTransition(NodeState.RUNNING, NodeState.LOST, RMNodeEventType.EXPIRE, - new DeactivateNodeTransition(RMNodeState.LOST)) - .addTransition(RMNodeState.RUNNING, RMNodeState.REBOOTED, + new DeactivateNodeTransition(NodeState.LOST)) + .addTransition(NodeState.RUNNING, NodeState.REBOOTED, RMNodeEventType.REBOOTING, - new DeactivateNodeTransition(RMNodeState.REBOOTED)) - .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, + new DeactivateNodeTransition(NodeState.REBOOTED)) + .addTransition(NodeState.RUNNING, NodeState.RUNNING, RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) - .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, + .addTransition(NodeState.RUNNING, NodeState.RUNNING, RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) - .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, + .addTransition(NodeState.RUNNING, NodeState.RUNNING, RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) //Transitions from UNHEALTHY state - .addTransition(RMNodeState.UNHEALTHY, - EnumSet.of(RMNodeState.UNHEALTHY, RMNodeState.RUNNING), + .addTransition(NodeState.UNHEALTHY, + EnumSet.of(NodeState.UNHEALTHY, NodeState.RUNNING), RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition()) - .addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, + .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) - .addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, + .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) - .addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, + .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) // create the topology tables .installTopology(); - private final StateMachine stateMachine; public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, @@ -252,7 +253,7 @@ public class RMNodeImpl implements RMNode, EventHandler { } @Override - public RMNodeState getState() { + public NodeState getState() { this.readLock.lock(); try { @@ -302,7 +303,7 @@ public class RMNodeImpl implements RMNode, EventHandler { LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType()); try { writeLock.lock(); - RMNodeState oldState = getState(); + NodeState oldState = getState(); try { stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitonException e) { @@ -321,7 +322,7 @@ public class RMNodeImpl implements RMNode, EventHandler { } } - private void updateMetricsForRejoinedNode(RMNodeState previousNodeState) { + private void updateMetricsForRejoinedNode(NodeState previousNodeState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); metrics.incrNumActiveNodes(); @@ -341,7 +342,7 @@ public class RMNodeImpl implements RMNode, EventHandler { } } - private void updateMetricsForDeactivatedNode(RMNodeState finalState) { + private void updateMetricsForDeactivatedNode(NodeState finalState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); metrics.decrNumActiveNodes(); @@ -440,8 +441,8 @@ public class RMNodeImpl implements RMNode, EventHandler { public static class DeactivateNodeTransition implements SingleArcTransition { - private final RMNodeState finalState; - public DeactivateNodeTransition(RMNodeState finalState) { + private final NodeState finalState; + public DeactivateNodeTransition(NodeState finalState) { this.finalState = finalState; } @@ -466,9 +467,9 @@ public class RMNodeImpl implements RMNode, EventHandler { } public static class StatusUpdateWhenHealthyTransition implements - MultipleArcTransition { + MultipleArcTransition { @Override - public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { + public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event; @@ -486,8 +487,8 @@ public class RMNodeImpl implements RMNode, EventHandler { new NodesListManagerEvent( NodesListManagerEventType.NODE_UNUSABLE, rmNode)); // Update metrics - rmNode.updateMetricsForDeactivatedNode(RMNodeState.UNHEALTHY); - return RMNodeState.UNHEALTHY; + rmNode.updateMetricsForDeactivatedNode(NodeState.UNHEALTHY); + return NodeState.UNHEALTHY; } // Filter the map to only obtain just launched containers and finished @@ -541,15 +542,15 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.containersToClean.clear(); rmNode.finishedApplications.clear(); - return RMNodeState.RUNNING; + return NodeState.RUNNING; } } public static class StatusUpdateWhenUnHealthyTransition implements - MultipleArcTransition { + MultipleArcTransition { @Override - public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { + public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event; // Switch the last heartbeatresponse. @@ -566,11 +567,11 @@ public class RMNodeImpl implements RMNode, EventHandler { // notifiers get update metadata because they will very likely query it // upon notification // Update metrics - rmNode.updateMetricsForRejoinedNode(RMNodeState.UNHEALTHY); - return RMNodeState.RUNNING; + rmNode.updateMetricsForRejoinedNode(NodeState.UNHEALTHY); + return NodeState.RUNNING; } - return RMNodeState.UNHEALTHY; + return NodeState.UNHEALTHY; } } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java deleted file mode 100644 index c3f963c8282..00000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.rmnode; - -import org.apache.hadoop.yarn.api.records.NodeState; - -//TODO yarn.api.records.NodeState is a clone of RMNodeState made for MR-3353. -// In a subsequent patch RMNodeState should be replaced with NodeState -public enum RMNodeState { - NEW, RUNNING, UNHEALTHY, DECOMMISSIONED, LOST, REBOOTED; - - public static NodeState toNodeState(RMNodeState state) { - switch(state) { - case NEW: - return NodeState.NEW; - case RUNNING: - return NodeState.RUNNING; - case UNHEALTHY: - return NodeState.UNHEALTHY; - case DECOMMISSIONED: - return NodeState.DECOMMISSIONED; - case LOST: - return NodeState.LOST; - case REBOOTED: - return NodeState.REBOOTED; - } - return null; - } -}; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index 3cb42888cb1..a9aafc5dbb7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -27,10 +27,10 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import java.util.Collection; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.util.Times; @@ -78,9 +78,9 @@ class NodesPage extends RmView { th(".mem", "Mem Avail"). _()._(). tbody(); - RMNodeState stateFilter = null; + NodeState stateFilter = null; if(type != null && !type.isEmpty()) { - stateFilter = RMNodeState.valueOf(type.toUpperCase()); + stateFilter = NodeState.valueOf(type.toUpperCase()); } Collection rmNodes = this.rmContext.getRMNodes().values(); boolean isInactive = false; @@ -96,14 +96,14 @@ class NodesPage extends RmView { } for (RMNode ni : rmNodes) { if(stateFilter != null) { - RMNodeState state = ni.getState(); + NodeState state = ni.getState(); if(!stateFilter.equals(state)) { continue; } } else { // No filter. User is asking for all nodes. Make sure you skip the // unhealthy nodes. - if (ni.getState() == RMNodeState.UNHEALTHY) { + if (ni.getState() == NodeState.UNHEALTHY) { continue; } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 6d61d6f2aec..857367a8f45 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -39,13 +39,13 @@ import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -162,7 +162,7 @@ public class RMWebServices { Collection rmNodes = this.rm.getRMContext().getRMNodes().values(); boolean isInactive = false; if (filterState != null && !filterState.isEmpty()) { - RMNodeState nodeState = RMNodeState.valueOf(filterState.toUpperCase()); + NodeState nodeState = NodeState.valueOf(filterState.toUpperCase()); switch (nodeState) { case DECOMMISSIONED: case LOST: @@ -182,7 +182,7 @@ public class RMWebServices { } else { // No filter. User is asking for all nodes. Make sure you skip the // unhealthy nodes. - if (ni.getState() == RMNodeState.UNHEALTHY) { + if (ni.getState() == NodeState.UNHEALTHY) { continue; } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java index facd73aef67..c8e7b9ba387 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -25,8 +25,8 @@ import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; @@ -35,7 +35,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeRepo public class NodeInfo { protected String rack; - protected RMNodeState state; + protected NodeState state; protected String id; protected String nodeHostName; protected String nodeHTTPAddress; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index f30883f1bad..f1172e20de5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -26,11 +26,11 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import com.google.common.collect.Lists; @@ -48,7 +48,7 @@ public class MockNodes { for (int j = 0; j < nodesPerRack; ++j) { if (j == (nodesPerRack - 1)) { // One unhealthy node per rack. - list.add(nodeInfo(i, perNode, RMNodeState.UNHEALTHY)); + list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY)); } list.add(newNodeInfo(i, perNode)); } @@ -61,7 +61,7 @@ public class MockNodes { List list = Lists.newArrayList(); for (int i = 0; i < racks; ++i) { for (int j = 0; j < nodesPerRack; ++j) { - RMNodeState[] allStates = RMNodeState.values(); + NodeState[] allStates = NodeState.values(); list.add(nodeInfo(i, perNode, allStates[j % allStates.length])); } } @@ -102,11 +102,11 @@ public class MockNodes { private Resource perNode; private String rackName; private NodeHealthStatus nodeHealthStatus; - private RMNodeState state; + private NodeState state; public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress, Resource perNode, String rackName, NodeHealthStatus nodeHealthStatus, - int cmdPort, String hostName, RMNodeState state) { + int cmdPort, String hostName, NodeState state) { this.nodeId = nodeId; this.nodeAddr = nodeAddr; this.httpAddress = httpAddress; @@ -169,7 +169,7 @@ public class MockNodes { } @Override - public RMNodeState getState() { + public NodeState getState() { return this.state; } @@ -189,11 +189,11 @@ public class MockNodes { } }; - private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr) { + private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++); } - private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr, int hostnum) { + private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr, int hostnum) { final String rackName = "rack"+ rack; final int nid = hostnum; final String hostName = "host"+ nid; @@ -202,7 +202,7 @@ public class MockNodes { final String httpAddress = httpAddr; final NodeHealthStatus nodeHealthStatus = recordFactory.newRecordInstance(NodeHealthStatus.class); - if (state != RMNodeState.UNHEALTHY) { + if (state != NodeState.UNHEALTHY) { nodeHealthStatus.setIsNodeHealthy(true); nodeHealthStatus.setHealthReport("HealthyMe"); } @@ -211,12 +211,12 @@ public class MockNodes { } public static RMNode nodeInfo(int rack, final Resource perNode, - RMNodeState state) { + NodeState state) { return buildRMNode(rack, perNode, state, "N/A"); } public static RMNode newNodeInfo(int rack, final Resource perNode) { - return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0"); + return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0"); } public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 75ae612165e..9f8633d5312 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; @@ -48,7 +49,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -146,7 +146,7 @@ public class MockRM extends ResourceManager { node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.EXPIRE)); } - public void NMwaitForState(NodeId nodeid, RMNodeState finalState) + public void NMwaitForState(NodeId nodeid, NodeState finalState) throws Exception { RMNode node = getRMContext().getRMNodes().get(nodeid); Assert.assertNotNull("node shouldn't be null", node); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java index abffb060bac..d709beebebe 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java @@ -33,7 +33,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.After; @@ -83,7 +82,7 @@ public class TestAMRMRPCNodeUpdates { private void syncNodeLost(MockNM nm) throws Exception { rm.sendNodeStarted(nm); - rm.NMwaitForState(nm.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm.getNodeId(), NodeState.RUNNING); rm.sendNodeLost(nm); dispatcher.await(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index 4922419c2f1..1979273ce12 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -20,9 +20,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import java.io.IOException; import java.io.PrintWriter; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock; import org.apache.hadoop.yarn.webapp.test.WebAppTests; import org.junit.Before; @@ -44,7 +44,7 @@ public class TestNodesPage { // The following is because of the way TestRMWebApp.mockRMContext creates // nodes. final int numberOfLostNodesPerRack = numberOfNodesPerRack - / RMNodeState.values().length; + / NodeState.values().length; // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 588930f6efb..713d0a9d73c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -41,7 +42,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; @@ -124,13 +124,13 @@ public class TestRMWebApp { // Unhealthy nodes instance.moreParams().put(YarnWebParams.NODE_STATE, - RMNodeState.UNHEALTHY.toString()); + NodeState.UNHEALTHY.toString()); instance.render(); WebAppTests.flushOutput(injector); // Lost nodes instance.moreParams().put(YarnWebParams.NODE_STATE, - RMNodeState.LOST.toString()); + NodeState.LOST.toString()); instance.render(); WebAppTests.flushOutput(injector); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index 828f5f6f252..cb4a3af5f59 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -32,13 +32,13 @@ import javax.xml.parsers.DocumentBuilderFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; @@ -54,7 +54,6 @@ import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.NodeList; import org.xml.sax.InputSource; - import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.servlet.GuiceServletContextListener; @@ -131,15 +130,15 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); // One unhealthy node which should not appear in the list after // MAPREDUCE-3760. MockNM nm3 = rm.registerNode("h3:1236", 5122); - rm.NMwaitForState(nm3.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm3.getNodeId(), NodeState.NEW); rm.sendNodeStarted(nm3); - rm.NMwaitForState(nm3.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm3.getNodeId(), NodeState.RUNNING); RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes() .get(nm3.getNodeId()); NodeHealthStatus nodeHealth = node.getNodeHealthStatus(); @@ -147,7 +146,7 @@ public class TestRMWebServicesNodes extends JerseyTest { nodeHealth.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth, new ArrayList(), null, null)); - rm.NMwaitForState(nm3.getNodeId(), RMNodeState.UNHEALTHY); + rm.NMwaitForState(nm3.getNodeId(), NodeState.UNHEALTHY); ClientResponse response = r.path("ws").path("v1").path("cluster").path("nodes") @@ -169,11 +168,11 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("nodes").queryParam("state", RMNodeState.RUNNING.toString()) + .path("nodes").queryParam("state", NodeState.RUNNING.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); @@ -196,7 +195,7 @@ public class TestRMWebServicesNodes extends JerseyTest { ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes") - .queryParam("state", RMNodeState.DECOMMISSIONED.toString()) + .queryParam("state", NodeState.DECOMMISSIONED.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); @@ -231,7 +230,7 @@ public class TestRMWebServicesNodes extends JerseyTest { WebServicesTestUtils .checkStringMatch( "exception message", - "No enum const class org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState.BOGUSSTATE", + "No enum const class org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE", message); WebServicesTestUtils.checkStringMatch("exception type", "IllegalArgumentException", type); @@ -250,13 +249,13 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm2 = rm.registerNode("h2:1234", 5120); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm2); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING); rm.sendNodeLost(nm1); rm.sendNodeLost(nm2); ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("nodes").queryParam("state", RMNodeState.LOST.toString()) + .path("nodes").queryParam("state", NodeState.LOST.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); @@ -283,8 +282,8 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm2 = rm.registerNode("h2:1234", 5120); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm2); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING); rm.sendNodeLost(nm1); rm.sendNodeLost(nm2); @@ -312,8 +311,8 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes").queryParam("healthy", "true") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); @@ -332,8 +331,8 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes").queryParam("healthy", "TRUe") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); @@ -353,8 +352,8 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes() .get(nm1.getNodeId()); NodeHealthStatus nodeHealth = node.getNodeHealthStatus(); @@ -362,11 +361,11 @@ public class TestRMWebServicesNodes extends JerseyTest { nodeHealth.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth, new ArrayList(), null, null)); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.UNHEALTHY); + rm.NMwaitForState(nm1.getNodeId(), NodeState.UNHEALTHY); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes").queryParam("healthy", "true") - .queryParam("state", RMNodeState.RUNNING.toString()) + .queryParam("state", NodeState.RUNNING.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); @@ -380,8 +379,8 @@ public class TestRMWebServicesNodes extends JerseyTest { MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm2 = rm.registerNode("h2:1235", 5121); rm.sendNodeStarted(nm1); - rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); - rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("nodes").queryParam("healthy", "false") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); From a701c792f880c43ba807f00a92a99dadf89eab0c Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Fri, 4 May 2012 18:50:54 +0000 Subject: [PATCH 16/70] HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334116 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/datanode/DataXceiver.java | 24 ++- .../server/datanode/DataXceiverServer.java | 4 +- .../hdfs/TestDataTransferKeepalive.java | 159 ++++++++++++++++++ 4 files changed, 183 insertions(+), 7 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7d53324acc8..fd629ae49fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -597,6 +597,9 @@ Release 2.0.0 - UNRELEASED HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(), and remove synchronized from updatePermissionStatus(..). (szetszwo) + HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout + (todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 995840066db..4114d7f9cc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.SocketInputWrapper; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -83,13 +84,24 @@ class DataXceiver extends Receiver implements Runnable { private final DataXceiverServer dataXceiverServer; private long opStartTime; //the start time of receiving an Op + private final SocketInputWrapper socketInputWrapper; - public DataXceiver(Socket s, DataNode datanode, + public static DataXceiver create(Socket s, DataNode dn, + DataXceiverServer dataXceiverServer) throws IOException { + + SocketInputWrapper iw = NetUtils.getInputStream(s); + return new DataXceiver(s, iw, dn, dataXceiverServer); + } + + private DataXceiver(Socket s, + SocketInputWrapper socketInput, + DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(new DataInputStream(new BufferedInputStream( - NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE))); + socketInput, HdfsConstants.SMALL_BUFFER_SIZE))); this.s = s; + this.socketInputWrapper = socketInput; this.isLocal = s.getInetAddress().equals(s.getLocalAddress()); this.datanode = datanode; this.dnConf = datanode.getDnConf(); @@ -128,8 +140,6 @@ class DataXceiver extends Receiver implements Runnable { Op op = null; dataXceiverServer.childSockets.add(s); try { - int stdTimeout = s.getSoTimeout(); - // We process requests in a loop, and stay around for a short timeout. // This optimistic behaviour allows the other end to reuse connections. // Setting keepalive timeout to 0 disable this behavior. @@ -139,7 +149,9 @@ class DataXceiver extends Receiver implements Runnable { try { if (opsProcessed != 0) { assert dnConf.socketKeepaliveTimeout > 0; - s.setSoTimeout(dnConf.socketKeepaliveTimeout); + socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout); + } else { + socketInputWrapper.setTimeout(dnConf.socketTimeout); } op = readOp(); } catch (InterruptedIOException ignored) { @@ -160,7 +172,7 @@ class DataXceiver extends Receiver implements Runnable { // restore normal timeout if (opsProcessed != 0) { - s.setSoTimeout(stdTimeout); + s.setSoTimeout(dnConf.socketTimeout); } opStartTime = now(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index f32b2968f52..bb0f7fd81b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -135,6 +135,7 @@ class DataXceiverServer implements Runnable { try { s = ss.accept(); s.setTcpNoDelay(true); + // Timeouts are set within DataXceiver.run() // Make sure the xceiver count is not exceeded int curXceiverCount = datanode.getXceiverCount(); @@ -144,7 +145,8 @@ class DataXceiverServer implements Runnable { + maxXceiverCount); } - new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this)) + new Daemon(datanode.threadGroup, + DataXceiver.create(s, datanode, this)) .start(); } catch (SocketTimeoutException ignored) { // wake up to see if should continue to run diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java new file mode 100644 index 00000000000..852f3c6801a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; +import static org.junit.Assert.*; + +import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.net.Socket; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.ReflectionUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestDataTransferKeepalive { + Configuration conf = new HdfsConfiguration(); + private MiniDFSCluster cluster; + private FileSystem fs; + private InetSocketAddress dnAddr; + private DataNode dn; + private DFSClient dfsClient; + private static Path TEST_FILE = new Path("/test"); + + private static final int KEEPALIVE_TIMEOUT = 1000; + private static final int WRITE_TIMEOUT = 3000; + + @Before + public void setup() throws Exception { + conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, + KEEPALIVE_TIMEOUT); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + fs = cluster.getFileSystem(); + dfsClient = ((DistributedFileSystem)fs).dfs; + + String poolId = cluster.getNamesystem().getBlockPoolId(); + dn = cluster.getDataNodes().get(0); + DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP( + dn, poolId); + dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr()); + } + + @After + public void teardown() { + cluster.shutdown(); + } + + /** + * Regression test for HDFS-3357. Check that the datanode is respecting + * its configured keepalive timeout. + */ + @Test(timeout=30000) + public void testKeepaliveTimeouts() throws Exception { + DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); + + // Clients that write aren't currently re-used. + assertEquals(0, dfsClient.socketCache.size()); + assertXceiverCount(0); + + // Reads the file, so we should get a + // cached socket, and should have an xceiver on the other side. + DFSTestUtil.readFile(fs, TEST_FILE); + assertEquals(1, dfsClient.socketCache.size()); + assertXceiverCount(1); + + // Sleep for a bit longer than the keepalive timeout + // and make sure the xceiver died. + Thread.sleep(KEEPALIVE_TIMEOUT * 2); + assertXceiverCount(0); + + // The socket is still in the cache, because we don't + // notice that it's closed until we try to read + // from it again. + assertEquals(1, dfsClient.socketCache.size()); + + // Take it out of the cache - reading should + // give an EOF. + Socket s = dfsClient.socketCache.get(dnAddr); + assertNotNull(s); + assertEquals(-1, NetUtils.getInputStream(s).read()); + } + + /** + * Test for the case where the client beings to read a long block, but doesn't + * read bytes off the stream quickly. The datanode should time out sending the + * chunks and the transceiver should die, even if it has a long keepalive. + */ + @Test(timeout=30000) + public void testSlowReader() throws Exception { + // Restart the DN with a shorter write timeout. + DataNodeProperties props = cluster.stopDataNode(0); + props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, + WRITE_TIMEOUT); + props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, + 120000); + assertTrue(cluster.restartDataNode(props, true)); + // Wait for heartbeats to avoid a startup race where we + // try to write the block while the DN is still starting. + cluster.triggerHeartbeats(); + + dn = cluster.getDataNodes().get(0); + + DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L); + FSDataInputStream stm = fs.open(TEST_FILE); + try { + stm.read(); + assertXceiverCount(1); + + Thread.sleep(WRITE_TIMEOUT + 1000); + // DN should time out in sendChunks, and this should force + // the xceiver to exit. + assertXceiverCount(0); + } finally { + IOUtils.closeStream(stm); + } + } + + private void assertXceiverCount(int expected) { + // Subtract 1, since the DataXceiverServer + // counts as one + int count = dn.getXceiverCount() - 1; + if (count != expected) { + ReflectionUtils.printThreadInfo( + new PrintWriter(System.err), + "Thread dumps"); + fail("Expected " + expected + " xceivers, found " + + count); + } + } +} From 0bf8f11860e68f615260543514d0d604d523efe8 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 4 May 2012 20:16:23 +0000 Subject: [PATCH 17/70] HDFS-3303. Remove Writable implementation from RemoteEditLogManifest. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334156 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../protocol/RemoteEditLogManifest.java | 29 +------------------ 2 files changed, 4 insertions(+), 28 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fd629ae49fd..a75a5f8bdb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -419,6 +419,9 @@ Release 2.0.0 - UNRELEASED HDFS-3339. Change INode to package private. (John George via szetszwo) + HDFS-3303. Remove Writable implementation from RemoteEditLogManifest. + (Brandon Li via szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java index f871828f840..60e41a615e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java @@ -17,22 +17,16 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; import java.util.Collections; import java.util.List; -import org.apache.hadoop.io.Writable; - import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; /** * An enumeration of logs available on a remote NameNode. */ -public class RemoteEditLogManifest implements Writable { +public class RemoteEditLogManifest { private List logs; @@ -75,25 +69,4 @@ public class RemoteEditLogManifest implements Writable { public String toString() { return "[" + Joiner.on(", ").join(logs) + "]"; } - - - @Override - public void write(DataOutput out) throws IOException { - out.writeInt(logs.size()); - for (RemoteEditLog log : logs) { - log.write(out); - } - } - - @Override - public void readFields(DataInput in) throws IOException { - int numLogs = in.readInt(); - logs = Lists.newArrayList(); - for (int i = 0; i < numLogs; i++) { - RemoteEditLog log = new RemoteEditLog(); - log.readFields(in); - logs.add(log); - } - checkState(); - } } From 5dbbe0e0a5d31689d3425e490865f95057dc051c Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 4 May 2012 21:58:44 +0000 Subject: [PATCH 18/70] HDFS-2617. Replaced Kerberized SSL for image transfer and fsck with SPNEGO-based solution. Contributed by Jakob Homan, Alejandro Abdelnur, and Aaron T. Myers git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334216 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/http/HttpServer.java | 40 +-- .../Krb5AndCertsSslSocketConnector.java | 232 ------------------ .../apache/hadoop/security/SecurityUtil.java | 110 ++------- .../packages/templates/conf/hdfs-site.xml | 7 - hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 +- .../apache/hadoop/hdfs/HftpFileSystem.java | 12 +- .../hdfs/server/namenode/GetImageServlet.java | 44 +--- .../hadoop/hdfs/server/namenode/NameNode.java | 7 +- .../server/namenode/NameNodeHttpServer.java | 196 +++++++-------- .../server/namenode/SecondaryNameNode.java | 96 ++++---- .../hdfs/server/namenode/TransferFsImage.java | 16 +- .../server/namenode/ha/BootstrapStandby.java | 3 +- .../namenode/ha/StandbyCheckpointer.java | 2 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 2 +- .../org/apache/hadoop/hdfs/tools/DFSck.java | 15 +- .../hdfs/tools/DelegationTokenFetcher.java | 15 +- .../src/main/resources/hdfs-default.xml | 11 + 18 files changed, 213 insertions(+), 602 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index ab3e5999e5b..be4f26fbf2b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -52,8 +52,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.jmx.JMXJsonServlet; import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.metrics.MetricsServlet; -import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector; -import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.ReflectionUtils; @@ -99,6 +97,7 @@ public class HttpServer implements FilterContainer { // gets stored. public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf"; static final String ADMINS_ACL = "admins.acl"; + public static final String SPNEGO_FILTER = "SpnegoFilter"; public static final String BIND_ADDRESS = "bind.address"; @@ -237,11 +236,7 @@ public class HttpServer implements FilterContainer { webServer.addHandler(webAppContext); addDefaultApps(contexts, appDir, conf); - - defineFilter(webAppContext, "krb5Filter", - Krb5AndCertsSslSocketConnector.Krb5SslFilter.class.getName(), - null, null); - + addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { @@ -424,12 +419,13 @@ public class HttpServer implements FilterContainer { * protect with Kerberos authentication. * Note: This method is to be used for adding servlets that facilitate * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberized + + * servlets added using this method, filters (except internal Kerberos * filters) are not enabled. * * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class + * @param requireAuth Require Kerberos authenticate to access servlet */ public void addInternalServlet(String name, String pathSpec, Class clazz, boolean requireAuth) { @@ -440,11 +436,11 @@ public class HttpServer implements FilterContainer { webAppContext.addServlet(holder, pathSpec); if(requireAuth && UserGroupInformation.isSecurityEnabled()) { - LOG.info("Adding Kerberos filter to " + name); + LOG.info("Adding Kerberos (SPNEGO) filter to " + name); ServletHandler handler = webAppContext.getServletHandler(); FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); - fmap.setFilterName("krb5Filter"); + fmap.setFilterName(SPNEGO_FILTER); fmap.setDispatches(Handler.ALL); handler.addFilterMapping(fmap); } @@ -580,26 +576,14 @@ public class HttpServer implements FilterContainer { webServer.addConnector(sslListener); } - /** - * Configure an ssl listener on the server. - * @param addr address to listen on - * @param sslConf conf to retrieve ssl options - * @param needClientAuth whether client authentication is required - */ - public void addSslListener(InetSocketAddress addr, Configuration sslConf, - boolean needClientAuth) throws IOException { - addSslListener(addr, sslConf, needClientAuth, false); - } - /** * Configure an ssl listener on the server. * @param addr address to listen on * @param sslConf conf to retrieve ssl options * @param needCertsAuth whether x509 certificate authentication is required - * @param needKrbAuth whether to allow kerberos auth */ public void addSslListener(InetSocketAddress addr, Configuration sslConf, - boolean needCertsAuth, boolean needKrbAuth) throws IOException { + boolean needCertsAuth) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } @@ -612,15 +596,7 @@ public class HttpServer implements FilterContainer { System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( "ssl.server.truststore.type", "jks")); } - Krb5AndCertsSslSocketConnector.MODE mode; - if(needCertsAuth && needKrbAuth) - mode = MODE.BOTH; - else if (!needCertsAuth && needKrbAuth) - mode = MODE.KRB; - else // Default to certificates - mode = MODE.CERTS; - - SslSocketConnector sslListener = new Krb5AndCertsSslSocketConnector(mode); + SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java deleted file mode 100644 index 625cad52d35..00000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java +++ /dev/null @@ -1,232 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.security; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.ServerSocket; -import java.security.Principal; -import java.util.Collections; -import java.util.List; -import java.util.Random; - -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLServerSocket; -import javax.net.ssl.SSLServerSocketFactory; -import javax.net.ssl.SSLSocket; -import javax.security.auth.kerberos.KerberosPrincipal; -import javax.servlet.Filter; -import javax.servlet.FilterChain; -import javax.servlet.FilterConfig; -import javax.servlet.ServletException; -import javax.servlet.ServletRequest; -import javax.servlet.ServletResponse; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletRequestWrapper; -import javax.servlet.http.HttpServletResponse; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.mortbay.io.EndPoint; -import org.mortbay.jetty.HttpSchemes; -import org.mortbay.jetty.Request; -import org.mortbay.jetty.security.ServletSSL; -import org.mortbay.jetty.security.SslSocketConnector; - -/** - * Extend Jetty's {@link SslSocketConnector} to optionally also provide - * Kerberos5ized SSL sockets. The only change in behavior from superclass - * is that we no longer honor requests to turn off NeedAuthentication when - * running with Kerberos support. - */ -public class Krb5AndCertsSslSocketConnector extends SslSocketConnector { - public static final List KRB5_CIPHER_SUITES = - Collections.unmodifiableList(Collections.singletonList( - "TLS_KRB5_WITH_3DES_EDE_CBC_SHA")); - static { - SecurityUtil.initKrb5CipherSuites(); - } - - private static final Log LOG = LogFactory - .getLog(Krb5AndCertsSslSocketConnector.class); - - private static final String REMOTE_PRINCIPAL = "remote_principal"; - - public enum MODE {KRB, CERTS, BOTH} // Support Kerberos, certificates or both? - - private final boolean useKrb; - private final boolean useCerts; - - public Krb5AndCertsSslSocketConnector() { - super(); - useKrb = true; - useCerts = false; - - setPasswords(); - } - - public Krb5AndCertsSslSocketConnector(MODE mode) { - super(); - useKrb = mode == MODE.KRB || mode == MODE.BOTH; - useCerts = mode == MODE.CERTS || mode == MODE.BOTH; - setPasswords(); - logIfDebug("useKerb = " + useKrb + ", useCerts = " + useCerts); - } - - // If not using Certs, set passwords to random gibberish or else - // Jetty will actually prompt the user for some. - private void setPasswords() { - if(!useCerts) { - Random r = new Random(); - System.setProperty("jetty.ssl.password", String.valueOf(r.nextLong())); - System.setProperty("jetty.ssl.keypassword", String.valueOf(r.nextLong())); - } - } - - @Override - protected SSLServerSocketFactory createFactory() throws Exception { - if(useCerts) - return super.createFactory(); - - SSLContext context = super.getProvider()==null - ? SSLContext.getInstance(super.getProtocol()) - :SSLContext.getInstance(super.getProtocol(), super.getProvider()); - context.init(null, null, null); - - return context.getServerSocketFactory(); - } - - /* (non-Javadoc) - * @see org.mortbay.jetty.security.SslSocketConnector#newServerSocket(java.lang.String, int, int) - */ - @Override - protected ServerSocket newServerSocket(String host, int port, int backlog) - throws IOException { - logIfDebug("Creating new KrbServerSocket for: " + host); - SSLServerSocket ss = null; - - if(useCerts) // Get the server socket from the SSL super impl - ss = (SSLServerSocket)super.newServerSocket(host, port, backlog); - else { // Create a default server socket - try { - ss = (SSLServerSocket)(host == null - ? createFactory().createServerSocket(port, backlog) : - createFactory().createServerSocket(port, backlog, InetAddress.getByName(host))); - } catch (Exception e) - { - LOG.warn("Could not create KRB5 Listener", e); - throw new IOException("Could not create KRB5 Listener: " + e.toString()); - } - } - - // Add Kerberos ciphers to this socket server if needed. - if(useKrb) { - ss.setNeedClientAuth(true); - String [] combined; - if(useCerts) { // combine the cipher suites - String[] certs = ss.getEnabledCipherSuites(); - combined = new String[certs.length + KRB5_CIPHER_SUITES.size()]; - System.arraycopy(certs, 0, combined, 0, certs.length); - System.arraycopy(KRB5_CIPHER_SUITES.toArray(new String[0]), 0, combined, - certs.length, KRB5_CIPHER_SUITES.size()); - } else { // Just enable Kerberos auth - combined = KRB5_CIPHER_SUITES.toArray(new String[0]); - } - - ss.setEnabledCipherSuites(combined); - } - - return ss; - }; - - @Override - public void customize(EndPoint endpoint, Request request) throws IOException { - if(useKrb) { // Add Kerberos-specific info - SSLSocket sslSocket = (SSLSocket)endpoint.getTransport(); - Principal remotePrincipal = sslSocket.getSession().getPeerPrincipal(); - logIfDebug("Remote principal = " + remotePrincipal); - request.setScheme(HttpSchemes.HTTPS); - request.setAttribute(REMOTE_PRINCIPAL, remotePrincipal); - - if(!useCerts) { // Add extra info that would have been added by super - String cipherSuite = sslSocket.getSession().getCipherSuite(); - Integer keySize = Integer.valueOf(ServletSSL.deduceKeyLength(cipherSuite));; - - request.setAttribute("javax.servlet.request.cipher_suite", cipherSuite); - request.setAttribute("javax.servlet.request.key_size", keySize); - } - } - - if(useCerts) super.customize(endpoint, request); - } - - private void logIfDebug(String s) { - if(LOG.isDebugEnabled()) - LOG.debug(s); - } - - /** - * Filter that takes the Kerberos principal identified in the - * {@link Krb5AndCertsSslSocketConnector} and provides it the to the servlet - * at runtime, setting the principal and short name. - */ - public static class Krb5SslFilter implements Filter { - @Override - public void doFilter(ServletRequest req, ServletResponse resp, - FilterChain chain) throws IOException, ServletException { - final Principal princ = - (Principal)req.getAttribute(Krb5AndCertsSslSocketConnector.REMOTE_PRINCIPAL); - - if(princ == null || !(princ instanceof KerberosPrincipal)) { - // Should never actually get here, since should be rejected at socket - // level. - LOG.warn("User not authenticated via kerberos from " + req.getRemoteAddr()); - ((HttpServletResponse)resp).sendError(HttpServletResponse.SC_FORBIDDEN, - "User not authenticated via Kerberos"); - return; - } - - // Provide principal information for servlet at runtime - ServletRequest wrapper = - new HttpServletRequestWrapper((HttpServletRequest) req) { - @Override - public Principal getUserPrincipal() { - return princ; - } - - /* - * Return the full name of this remote user. - * @see javax.servlet.http.HttpServletRequestWrapper#getRemoteUser() - */ - @Override - public String getRemoteUser() { - return princ.getName(); - } - }; - - chain.doFilter(wrapper, resp); - } - - @Override - public void init(FilterConfig arg0) throws ServletException { - /* Nothing to do here */ - } - - @Override - public void destroy() { /* Nothing to do here */ } - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 63683bf7209..8189cfdb279 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -17,14 +17,11 @@ package org.apache.hadoop.security; import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; +import java.net.URLConnection; import java.net.UnknownHostException; import java.security.AccessController; import java.security.PrivilegedAction; @@ -45,6 +42,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; @@ -134,79 +133,6 @@ public class SecurityUtil { return isTGSPrincipal(ticket.getServer()); } - /** - * Explicitly pull the service ticket for the specified host. This solves a - * problem with Java's Kerberos SSL problem where the client cannot - * authenticate against a cross-realm service. It is necessary for clients - * making kerberized https requests to call this method on the target URL - * to ensure that in a cross-realm environment the remote host will be - * successfully authenticated. - * - * This method is internal to Hadoop and should not be used by other - * applications. This method should not be considered stable or open: - * it will be removed when the Java behavior is changed. - * - * @param remoteHost Target URL the krb-https client will access - * @throws IOException if the service ticket cannot be retrieved - */ - public static void fetchServiceTicket(URL remoteHost) throws IOException { - if(!UserGroupInformation.isSecurityEnabled()) - return; - - String serviceName = "host/" + remoteHost.getHost(); - if (LOG.isDebugEnabled()) - LOG.debug("Fetching service ticket for host at: " + serviceName); - Object serviceCred = null; - Method credsToTicketMeth; - Class krb5utilClass; - try { - Class principalClass; - Class credentialsClass; - - if (System.getProperty("java.vendor").contains("IBM")) { - principalClass = Class.forName("com.ibm.security.krb5.PrincipalName"); - - credentialsClass = Class.forName("com.ibm.security.krb5.Credentials"); - krb5utilClass = Class.forName("com.ibm.security.jgss.mech.krb5"); - } else { - principalClass = Class.forName("sun.security.krb5.PrincipalName"); - credentialsClass = Class.forName("sun.security.krb5.Credentials"); - krb5utilClass = Class.forName("sun.security.jgss.krb5.Krb5Util"); - } - @SuppressWarnings("rawtypes") - Constructor principalConstructor = principalClass.getConstructor(String.class, - int.class); - Field KRB_NT_SRV_HST = principalClass.getDeclaredField("KRB_NT_SRV_HST"); - Method acquireServiceCredsMeth = - credentialsClass.getDeclaredMethod("acquireServiceCreds", - String.class, credentialsClass); - Method ticketToCredsMeth = krb5utilClass.getDeclaredMethod("ticketToCreds", - KerberosTicket.class); - credsToTicketMeth = krb5utilClass.getDeclaredMethod("credsToTicket", - credentialsClass); - - Object principal = principalConstructor.newInstance(serviceName, - KRB_NT_SRV_HST.get(principalClass)); - - serviceCred = acquireServiceCredsMeth.invoke(credentialsClass, - principal.toString(), - ticketToCredsMeth.invoke(krb5utilClass, getTgtFromSubject())); - } catch (Exception e) { - throw new IOException("Can't get service ticket for: " - + serviceName, e); - } - if (serviceCred == null) { - throw new IOException("Can't get service ticket for " + serviceName); - } - try { - Subject.getSubject(AccessController.getContext()).getPrivateCredentials() - .add(credsToTicketMeth.invoke(krb5utilClass, serviceCred)); - } catch (Exception e) { - throw new IOException("Can't get service ticket for: " - + serviceName, e); - } - } - /** * Convert Kerberos principal name pattern to valid Kerberos principal * names. It replaces hostname pattern with hostname, which should be @@ -513,6 +439,30 @@ public class SecurityUtil { } } + /** + * Open a (if need be) secure connection to a URL in a secure environment + * that is using SPNEGO to authenticate its URLs. All Namenode and Secondary + * Namenode URLs that are protected via SPNEGO should be accessed via this + * method. + * + * @param url to authenticate via SPNEGO. + * @return A connection that has been authenticated via SPNEGO + * @throws IOException If unable to authenticate via SPNEGO + */ + public static URLConnection openSecureHttpConnection(URL url) throws IOException { + if(!UserGroupInformation.isSecurityEnabled()) { + return url.openConnection(); + } + + AuthenticatedURL.Token token = new AuthenticatedURL.Token(); + try { + return new AuthenticatedURL().openConnection(url, token); + } catch (AuthenticationException e) { + throw new IOException("Exception trying to open authenticated connection to " + + url, e); + } + } + /** * Resolves a host subject to the security requirements determined by * hadoop.security.token.service.use_ip. @@ -664,10 +614,4 @@ public class SecurityUtil { } } - public static void initKrb5CipherSuites() { - if (UserGroupInformation.isSecurityEnabled()) { - System.setProperty("https.cipherSuites", - Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0)); - } - } } diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml index ee91f9e73fb..81afa95bae1 100644 --- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml +++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml @@ -128,13 +128,6 @@ - - dfs.secondary.https.port - 50490 - The https port where secondary-namenode binds - - - dfs.datanode.kerberos.principal dn/_HOST@${local.realm} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a75a5f8bdb3..305d33cb5f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -435,6 +435,9 @@ Release 2.0.0 - UNRELEASED HDFS-2476. More CPU efficient data structure for under-replicated, over-replicated, and invalidated blocks. (Tomasz Nykiel via todd) + HDFS-2617. Replaced Kerberized SSL for image transfer and fsck + with SPNEGO-based solution. (jghoman, tucu, and atm via eli) + BUG FIXES HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 1b42351bc14..41d4960d7e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -319,10 +319,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal"; public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file"; public static final String DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal"; - public static final String DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.namenode.kerberos.https.principal"; + public static final String DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal"; public static final String DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file"; public static final String DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal"; - public static final String DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal"; + public static final String DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal"; public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold"; public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index befa58c56ab..989fc123004 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -144,7 +144,7 @@ public class HftpFileSystem extends FileSystem } protected URI getNamenodeSecureUri(URI uri) { - return DFSUtil.createUri("https", getNamenodeSecureAddr(uri)); + return DFSUtil.createUri("http", getNamenodeSecureAddr(uri)); } @Override @@ -247,7 +247,7 @@ public class HftpFileSystem extends FileSystem c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer); } catch (Exception e) { LOG.info("Couldn't get a delegation token from " + nnHttpUrl + - " using https."); + " using http."); if(LOG.isDebugEnabled()) { LOG.debug("error was ", e); } @@ -686,11 +686,11 @@ public class HftpFileSystem extends FileSystem Configuration conf) throws IOException { // update the kerberos credentials, if they are coming from a keytab UserGroupInformation.getLoginUser().reloginFromKeytab(); - // use https to renew the token + // use http to renew the token InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); return DelegationTokenFetcher.renewDelegationToken - (DFSUtil.createUri("https", serviceAddr).toString(), + (DFSUtil.createUri("http", serviceAddr).toString(), (Token) token); } @@ -700,10 +700,10 @@ public class HftpFileSystem extends FileSystem Configuration conf) throws IOException { // update the kerberos credentials, if they are coming from a keytab UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); - // use https to cancel the token + // use http to cancel the token InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); DelegationTokenFetcher.cancelDelegationToken - (DFSUtil.createUri("https", serviceAddr).toString(), + (DFSUtil.createUri("http", serviceAddr).toString(), (Token) token); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java index 3b29a70f342..eab5f71bad0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java @@ -27,6 +27,8 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.security.SecurityUtil; import org.apache.commons.logging.Log; @@ -34,7 +36,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.server.common.JspHelper; @@ -83,11 +84,11 @@ public class GetImageServlet extends HttpServlet { (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF); if(UserGroupInformation.isSecurityEnabled() && - !isValidRequestor(request.getRemoteUser(), conf)) { + !isValidRequestor(request.getUserPrincipal().getName(), conf)) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "Only Namenode and Secondary Namenode may access this servlet"); LOG.warn("Received non-NN/SNN request for image or edits from " - + request.getRemoteHost()); + + request.getUserPrincipal().getName() + " at " + request.getRemoteHost()); return; } @@ -156,15 +157,10 @@ public class GetImageServlet extends HttpServlet { } // issue a HTTP get request to download the new fsimage - MD5Hash downloadImageDigest = reloginIfNecessary().doAs( - new PrivilegedExceptionAction() { - @Override - public MD5Hash run() throws Exception { - return TransferFsImage.downloadImageToStorage( + MD5Hash downloadImageDigest = + TransferFsImage.downloadImageToStorage( parsedParams.getInfoServer(), txid, nnImage.getStorage(), true); - } - }); nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest); // Now that we have a new checkpoint, we might be able to @@ -176,18 +172,6 @@ public class GetImageServlet extends HttpServlet { } return null; } - - // We may have lost our ticket since the last time we tried to open - // an http connection, so log in just in case. - private UserGroupInformation reloginIfNecessary() throws IOException { - // This method is only called on the NN, therefore it is safe to - // use these key values. - return UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), - NameNode.getAddress(conf).getHostName()), - conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); - } }); } catch (Throwable t) { @@ -232,18 +216,10 @@ public class GetImageServlet extends HttpServlet { Set validRequestors = new HashSet(); - validRequestors.add( - SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode - .getAddress(conf).getHostName())); validRequestors.add( SecurityUtil.getServerPrincipal(conf .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode .getAddress(conf).getHostName())); - validRequestors.add( - SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY), - SecondaryNameNode.getHttpAddress(conf).getHostName())); validRequestors.add( SecurityUtil.getServerPrincipal(conf .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY), @@ -251,10 +227,6 @@ public class GetImageServlet extends HttpServlet { if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) { Configuration otherNnConf = HAUtil.getConfForOtherNode(conf); - validRequestors.add( - SecurityUtil.getServerPrincipal(otherNnConf - .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), - NameNode.getAddress(otherNnConf).getHostName())); validRequestors.add( SecurityUtil.getServerPrincipal(otherNnConf .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), @@ -263,11 +235,11 @@ public class GetImageServlet extends HttpServlet { for(String v : validRequestors) { if(v != null && v.equals(remoteUser)) { - if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remoteUser); + if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser); return true; } } - if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remoteUser); + if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser); return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 05bc8c2ec67..56ba8a2082b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -164,10 +164,8 @@ public class NameNode { DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_KEY, - DFS_NAMENODE_HTTPS_ADDRESS_KEY, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, - DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_BACKUP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, @@ -361,8 +359,9 @@ public class NameNode { } protected void setHttpServerAddress(Configuration conf) { - conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, - NetUtils.getHostPortString(getHttpAddress())); + String hostPort = NetUtils.getHostPortString(getHttpAddress()); + conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort); + LOG.info("Web-server up at: " + hostPort); } protected void loadNamesystem(Configuration conf) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index c40a5b29728..2e62b8a1093 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT; @@ -43,6 +44,7 @@ import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authorize.AccessControlList; /** @@ -78,127 +80,101 @@ public class NameNodeHttpServer { conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), nn.getNameNodeAddress().getHostName()); } - + public void start() throws IOException { final String infoHost = bindAddress.getHostName(); - - if(UserGroupInformation.isSecurityEnabled()) { - String httpsUser = SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost); - if (httpsUser == null) { - LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY - + " not defined in config. Starting http server as " - + getDefaultServerPrincipal() - + ": Kerberized SSL may be not function correctly."); - } else { - // Kerberized SSL servers must be run from the host principal... - LOG.info("Logging in as " + httpsUser + " to start http server."); - SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, - DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost); - } - } + int infoPort = bindAddress.getPort(); - UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - try { - this.httpServer = ugi.doAs(new PrivilegedExceptionAction() { - @Override - public HttpServer run() throws IOException, InterruptedException { - int infoPort = bindAddress.getPort(); - httpServer = new HttpServer("hdfs", infoHost, infoPort, - infoPort == 0, conf, - new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) { - { - if (WebHdfsFileSystem.isEnabled(conf, LOG)) { - //add SPNEGO authentication filter for webhdfs - final String name = "SPNEGO"; - final String classname = AuthFilter.class.getName(); - final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; - Map params = getAuthFilterParams(conf); - defineFilter(webAppContext, name, classname, params, - new String[]{pathSpec}); - LOG.info("Added filter '" + name + "' (class=" + classname + ")"); - - // add webhdfs packages - addJerseyResourcePackage( - NamenodeWebHdfsMethods.class.getPackage().getName() - + ";" + Param.class.getPackage().getName(), pathSpec); - } + httpServer = new HttpServer("hdfs", infoHost, infoPort, + infoPort == 0, conf, + new AccessControlList(conf.get(DFS_ADMIN, " "))) { + { + // Add SPNEGO support to NameNode + if (UserGroupInformation.isSecurityEnabled()) { + Map params = new HashMap(); + String principalInConf = conf.get( + DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY); + if (principalInConf != null && !principalInConf.isEmpty()) { + params.put("kerberos.principal", + SecurityUtil.getServerPrincipal(principalInConf, infoHost)); + String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY); + if (httpKeytab != null && !httpKeytab.isEmpty()) { + params.put("kerberos.keytab", httpKeytab); } - private Map getAuthFilterParams(Configuration conf) - throws IOException { - Map params = new HashMap(); - String principalInConf = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); - if (principalInConf != null && !principalInConf.isEmpty()) { - params - .put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SecurityUtil.getServerPrincipal(principalInConf, - infoHost)); - } - String httpKeytab = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); - if (httpKeytab != null && !httpKeytab.isEmpty()) { - params.put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, - httpKeytab); - } - return params; - } - }; + params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); - boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); - boolean useKrb = UserGroupInformation.isSecurityEnabled(); - if (certSSL || useKrb) { - boolean needClientAuth = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, - DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); - InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf - .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); - Configuration sslConf = new HdfsConfiguration(false); - if (certSSL) { - sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); - } - httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, - useKrb); - // assume same ssl port for all datanodes - InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr( - conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY, - infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)); - httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, - datanodeSslPort.getPort()); + defineFilter(webAppContext, SPNEGO_FILTER, + AuthenticationFilter.class.getName(), params, null); } - httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn); - httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY, - nn.getNameNodeAddress()); - httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage()); - httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); - setupServlets(httpServer, conf); - httpServer.start(); - - // The web-server port can be ephemeral... ensure we have the correct - // info - infoPort = httpServer.getPort(); - httpAddress = new InetSocketAddress(infoHost, infoPort); - LOG.info(nn.getRole() + " Web-server up at: " + httpAddress); - return httpServer; } - }); - } catch (InterruptedException e) { - throw new IOException(e); - } finally { - if(UserGroupInformation.isSecurityEnabled() && - conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) { - // Go back to being the correct Namenode principal - LOG.info("Logging back in as NameNode user following http server start"); - nn.loginAsNameNodeUser(conf); + if (WebHdfsFileSystem.isEnabled(conf, LOG)) { + //add SPNEGO authentication filter for webhdfs + final String name = "SPNEGO"; + final String classname = AuthFilter.class.getName(); + final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; + Map params = getAuthFilterParams(conf); + defineFilter(webAppContext, name, classname, params, + new String[]{pathSpec}); + LOG.info("Added filter '" + name + "' (class=" + classname + ")"); + + // add webhdfs packages + addJerseyResourcePackage( + NamenodeWebHdfsMethods.class.getPackage().getName() + + ";" + Param.class.getPackage().getName(), pathSpec); + } } + + private Map getAuthFilterParams(Configuration conf) + throws IOException { + Map params = new HashMap(); + String principalInConf = conf + .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); + if (principalInConf != null && !principalInConf.isEmpty()) { + params + .put( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + SecurityUtil.getServerPrincipal(principalInConf, + bindAddress.getHostName())); + } + String httpKeytab = conf + .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); + if (httpKeytab != null && !httpKeytab.isEmpty()) { + params.put( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, + httpKeytab); + } + return params; + } + }; + + boolean certSSL = conf.getBoolean("dfs.https.enable", false); + if (certSSL) { + boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); + InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get( + "dfs.https.port", infoHost + ":" + 0)); + Configuration sslConf = new Configuration(false); + if (certSSL) { + sslConf.addResource(conf.get("dfs.https.server.keystore.resource", + "ssl-server.xml")); + } + httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); + // assume same ssl port for all datanodes + InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( + "dfs.datanode.https.address", infoHost + ":" + 50475)); + httpServer.setAttribute("datanode.https.port", datanodeSslPort + .getPort()); } + httpServer.setAttribute("name.node", nn); + httpServer.setAttribute("name.node.address", bindAddress); + httpServer.setAttribute("name.system.image", nn.getFSImage()); + httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); + setupServlets(httpServer, conf); + httpServer.start(); + httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort()); } - + + public void stop() throws Exception { if (httpServer != null) { httpServer.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 2ddb9f85a6e..7c02c644da8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -25,8 +25,10 @@ import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.Date; +import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Map; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -44,6 +46,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.NameNodeProxies; @@ -63,9 +66,9 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.Daemon; @@ -108,7 +111,6 @@ public class SecondaryNameNode implements Runnable { private volatile boolean shouldRun; private HttpServer infoServer; private int infoPort; - private int imagePort; private String infoBindAddress; private Collection checkpointDirs; @@ -229,63 +231,47 @@ public class SecondaryNameNode implements Runnable { // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); - - // initialize the webserver for uploading files. - // Kerberized SSL servers must be run from the host principal... - UserGroupInformation httpUGI = - UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SecurityUtil.getServerPrincipal(conf - .get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY), - infoBindAddress), - conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)); - try { - infoServer = httpUGI.doAs(new PrivilegedExceptionAction() { - @Override - public HttpServer run() throws IOException, InterruptedException { - LOG.info("Starting web server as: " + - UserGroupInformation.getCurrentUser().getUserName()); - int tmpInfoPort = infoSocAddr.getPort(); - infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, - tmpInfoPort == 0, conf, - new AccessControlList(conf.get(DFS_ADMIN, " "))); - - if(UserGroupInformation.isSecurityEnabled()) { - SecurityUtil.initKrb5CipherSuites(); - InetSocketAddress secInfoSocAddr = - NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt( - DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY, - DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT)); - imagePort = secInfoSocAddr.getPort(); - infoServer.addSslListener(secInfoSocAddr, conf, false, true); + // initialize the webserver for uploading files. + int tmpInfoPort = infoSocAddr.getPort(); + infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, + tmpInfoPort == 0, conf, + new AccessControlList(conf.get(DFS_ADMIN, " "))) { + { + if (UserGroupInformation.isSecurityEnabled()) { + Map params = new HashMap(); + String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY); + if (principalInConf != null && !principalInConf.isEmpty()) { + params.put("kerberos.principal", + SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName())); } - - infoServer.setAttribute("secondary.name.node", SecondaryNameNode.this); - infoServer.setAttribute("name.system.image", checkpointImage); - infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); - infoServer.addInternalServlet("getimage", "/getimage", - GetImageServlet.class, true); - infoServer.start(); - return infoServer; + String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); + if (httpKeytab != null && !httpKeytab.isEmpty()) { + params.put("kerberos.keytab", httpKeytab); + } + params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); + + defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(), + params, null); } - }); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - + } + }; + infoServer.setAttribute("secondary.name.node", this); + infoServer.setAttribute("name.system.image", checkpointImage); + infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); + infoServer.addInternalServlet("getimage", "/getimage", + GetImageServlet.class, true); + infoServer.start(); + LOG.info("Web server init done"); // The web-server port can be ephemeral... ensure we have the correct info infoPort = infoServer.getPort(); - if (!UserGroupInformation.isSecurityEnabled()) { - imagePort = infoPort; - } - - conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); - LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort); - LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort); + + conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort); + LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort); LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + - "(" + checkpointConf.getPeriod()/60 + " min)"); + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); } @@ -434,7 +420,7 @@ public class SecondaryNameNode implements Runnable { throw new IOException("This is not a DFS"); } - String configuredAddress = DFSUtil.getInfoServer(null, conf, true); + String configuredAddress = DFSUtil.getInfoServer(null, conf, false); String address = DFSUtil.substituteForWildcardAddress(configuredAddress, fsName.getHost()); LOG.debug("Will connect to NameNode at HTTP address: " + address); @@ -446,7 +432,7 @@ public class SecondaryNameNode implements Runnable { * for image transfers */ private InetSocketAddress getImageListenAddress() { - return new InetSocketAddress(infoBindAddress, imagePort); + return new InetSocketAddress(infoBindAddress, infoPort); } /** @@ -507,7 +493,7 @@ public class SecondaryNameNode implements Runnable { /** - * @param argv The parameters passed to this program. + * @param opts The parameters passed to this program. * @exception Exception if the filesystem does not exist. * @return 0 on success, non zero on error. */ @@ -709,7 +695,7 @@ public class SecondaryNameNode implements Runnable { * Construct a checkpoint image. * @param conf Node configuration. * @param imageDirs URIs of storage for image. - * @param editDirs URIs of storage for edit logs. + * @param editsDirs URIs of storage for edit logs. * @throws IOException If storage cannot be access. */ CheckpointStorage(Configuration conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index b99720aff5a..97088c5f433 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -201,19 +201,17 @@ public class TransferFsImage { String queryString, List localPaths, NNStorage dstStorage, boolean getChecksum) throws IOException { byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; - String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://"; - StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?"); - str.append(queryString); + String str = "http://" + nnHostPort + "/getimage?" + queryString; + LOG.info("Opening connection to " + str); // // open connection to remote server // - URL url = new URL(str.toString()); - - // Avoid Krb bug with cross-realm hosts - SecurityUtil.fetchServiceTicket(url); - HttpURLConnection connection = (HttpURLConnection) url.openConnection(); - + URL url = new URL(str); + + HttpURLConnection connection = (HttpURLConnection) + SecurityUtil.openSecureHttpConnection(url); + if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new HttpGetFailedException( "Image transfer servlet at " + url + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 1777ca6f6a2..773038f62b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -95,7 +95,6 @@ public class BootstrapStandby implements Tool, Configurable { static final int ERR_CODE_LOGS_UNAVAILABLE = 6; public int run(String[] args) throws Exception { - SecurityUtil.initKrb5CipherSuites(); parseArgs(args); parseConfAndFindOtherNN(); NameNode.checkAllowFormat(conf); @@ -322,7 +321,7 @@ public class BootstrapStandby implements Tool, Configurable { "Could not determine valid IPC address for other NameNode (%s)" + ", got: %s", otherNNId, otherIpcAddr); - otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, true); + otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false); otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr, otherIpcAddr.getHostName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 036dd431ade..bbec10c3084 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -92,7 +92,7 @@ public class StandbyCheckpointer { } private String getHttpAddress(Configuration conf) { - String configuredAddr = DFSUtil.getInfoServer(null, conf, true); + String configuredAddr = DFSUtil.getInfoServer(null, conf, false); // Use the hostname from the RPC address as a default, in case // the HTTP address is configured to 0.0.0.0. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 9c49654c471..5b572362411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -504,7 +504,7 @@ public class DFSAdmin extends FsShell { */ public int fetchImage(String[] argv, int idx) throws IOException { String infoServer = DFSUtil.getInfoServer( - HAUtil.getAddressOfActive(getDFS()), getConf(), true); + HAUtil.getAddressOfActive(getDFS()), getConf(), false); TransferFsImage.downloadMostRecentImageToDirectory(infoServer, new File(argv[idx])); return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 7d78bced360..34c72e9700d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -153,8 +153,7 @@ public class DFSck extends Configured implements Tool { url.append("&startblockafter=").append(String.valueOf(cookie)); } URL path = new URL(url.toString()); - SecurityUtil.fetchServiceTicket(path); - URLConnection connection = path.openConnection(); + URLConnection connection = SecurityUtil.openSecureHttpConnection(path); InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader( stream, "UTF-8")); @@ -222,16 +221,11 @@ public class DFSck extends Configured implements Tool { return null; } - return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, true); + return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false); } private int doWork(final String[] args) throws IOException { - String proto = "http://"; - if (UserGroupInformation.isSecurityEnabled()) { - SecurityUtil.initKrb5CipherSuites(); - proto = "https://"; - } - final StringBuilder url = new StringBuilder(proto); + final StringBuilder url = new StringBuilder("http://"); String namenodeAddress = getCurrentNamenodeAddress(); if (namenodeAddress == null) { @@ -279,8 +273,7 @@ public class DFSck extends Configured implements Tool { return listCorruptFileBlocks(dir, url.toString()); } URL path = new URL(url.toString()); - SecurityUtil.fetchServiceTicket(path); - URLConnection connection = path.openConnection(); + URLConnection connection = SecurityUtil.openSecureHttpConnection(path); InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader( stream, "UTF-8")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 3e652c13a7c..63aa6b2064f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -72,11 +72,6 @@ public class DelegationTokenFetcher { private static final String RENEW = "renew"; private static final String PRINT = "print"; - static { - // Enable Kerberos sockets - System.setProperty("https.cipherSuites", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"); - } - private static void printUsage(PrintStream err) throws IOException { err.println("fetchdt retrieves delegation tokens from the NameNode"); err.println(); @@ -106,7 +101,7 @@ public class DelegationTokenFetcher { final Configuration conf = new HdfsConfiguration(); Options fetcherOptions = new Options(); fetcherOptions.addOption(WEBSERVICE, true, - "HTTPS url to reach the NameNode at"); + "HTTP url to reach the NameNode at"); fetcherOptions.addOption(RENEWER, true, "Name of the delegation token renewer"); fetcherOptions.addOption(CANCEL, false, "cancel the token"); @@ -224,8 +219,7 @@ public class DelegationTokenFetcher { } URL remoteURL = new URL(url.toString()); - SecurityUtil.fetchServiceTicket(remoteURL); - URLConnection connection = URLUtils.openConnection(remoteURL); + URLConnection connection = SecurityUtil.openSecureHttpConnection(remoteURL); InputStream in = connection.getInputStream(); Credentials ts = new Credentials(); dis = new DataInputStream(in); @@ -264,7 +258,7 @@ public class DelegationTokenFetcher { try { URL url = new URL(buf.toString()); - SecurityUtil.fetchServiceTicket(url); + connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url); connection = (HttpURLConnection)URLUtils.openConnection(url); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error renewing token: " + @@ -358,8 +352,7 @@ public class DelegationTokenFetcher { HttpURLConnection connection=null; try { URL url = new URL(buf.toString()); - SecurityUtil.fetchServiceTicket(url); - connection = (HttpURLConnection)URLUtils.openConnection(url); + connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error cancelling token: " + connection.getResponseMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index b084d4b4c8b..54ce2a26a34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -858,4 +858,15 @@ + + dfs.namenode.kerberos.internal.spnego.principal + ${dfs.web.authentication.kerberos.principal} + + + + dfs.secondary.namenode.kerberos.internal.spnego.principal + ${dfs.web.authentication.kerberos.principal} + + + From a2eb525ce1e7228f8585e6c462e37588806f9834 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 4 May 2012 22:01:57 +0000 Subject: [PATCH 19/70] Move HDFS-2617 commit message to the right place. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334223 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 305d33cb5f0..73c95e14851 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -422,6 +422,9 @@ Release 2.0.0 - UNRELEASED HDFS-3303. Remove Writable implementation from RemoteEditLogManifest. (Brandon Li via szetszwo) + HDFS-2617. Replaced Kerberized SSL for image transfer and fsck + with SPNEGO-based solution. (jghoman, tucu, and atm via eli) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) @@ -435,9 +438,6 @@ Release 2.0.0 - UNRELEASED HDFS-2476. More CPU efficient data structure for under-replicated, over-replicated, and invalidated blocks. (Tomasz Nykiel via todd) - HDFS-2617. Replaced Kerberized SSL for image transfer and fsck - with SPNEGO-based solution. (jghoman, tucu, and atm via eli) - BUG FIXES HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol. From 51e520c68aafb73b784bf690a8a42de3af0f229c Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 4 May 2012 22:14:10 +0000 Subject: [PATCH 20/70] HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334231 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../apache/hadoop/fs/AbstractFileSystem.java | 2 +- .../main/java/org/apache/hadoop/fs/Path.java | 7 ++ .../hadoop/fs/viewfs/ChRootedFileSystem.java | 5 +- .../apache/hadoop/fs/viewfs/ChRootedFs.java | 6 +- .../fs/viewfs/ViewFileSystemBaseTest.java | 30 ++++-- .../hadoop/fs/viewfs/ViewFsBaseTest.java | 39 ++++++-- .../hdfs/server/namenode/FSNamesystem.java | 6 +- .../viewfs/TestViewFileSystemAtHdfsRoot.java | 93 +++++++++++++++++++ .../fs/viewfs/TestViewFileSystemHdfs.java | 6 +- .../fs/viewfs/TestViewFsAtHdfsRoot.java | 93 +++++++++++++++++++ .../hadoop/fs/viewfs/TestViewFsHdfs.java | 28 +----- 12 files changed, 266 insertions(+), 51 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8bb39d508b8..558c9b89625 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -423,6 +423,8 @@ Release 2.0.0 - UNRELEASED HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu) + HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 86974734b59..cbcce217b61 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -346,7 +346,7 @@ public abstract class AbstractFileSystem { path); } else { throw new InvalidPathException( - "Path without scheme with non-null autorhrity:" + path); + "Path without scheme with non-null authority:" + path); } } String thisScheme = this.getUri().getScheme(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 2fbed2a2bb2..3d193dfad28 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -223,6 +223,13 @@ public class Path implements Comparable { return isUriPathAbsolute(); } + /** + * @return true if and only if this path represents the root of a file system + */ + public boolean isRoot() { + return getParent() == null; + } + /** Returns the final component of this path.*/ public String getName() { String path = uri.getPath(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index 209fd216d14..85426fa4fff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -75,7 +75,8 @@ class ChRootedFileSystem extends FilterFileSystem { protected Path fullPath(final Path path) { super.checkPath(path); return path.isAbsolute() ? - new Path(chRootPathPartString + path.toUri().getPath()) : + new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString) + + path.toUri().getPath()) : new Path(chRootPathPartString + workingDir.toUri().getPath(), path); } @@ -127,7 +128,7 @@ class ChRootedFileSystem extends FilterFileSystem { } String pathPart = p.toUri().getPath(); return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart - .substring(chRootPathPartString.length() + 1); + .substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1)); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java index 063d0d04fa9..f6e27d28151 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java @@ -79,7 +79,8 @@ class ChRootedFs extends AbstractFileSystem { */ protected Path fullPath(final Path path) { super.checkPath(path); - return new Path(chRootPathPartString + path.toUri().getPath()); + return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString) + + path.toUri().getPath()); } public ChRootedFs(final AbstractFileSystem fs, final Path theRoot) @@ -127,7 +128,8 @@ class ChRootedFs extends AbstractFileSystem { } String pathPart = p.toUri().getPath(); return (pathPart.length() == chRootPathPartString.length()) ? - "" : pathPart.substring(chRootPathPartString.length() + 1); + "" : pathPart.substring(chRootPathPartString.length() + + (chRootPathPart.isRoot() ? 0 : 1)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 1de434e3a96..d4740a41fc9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest { @Before public void setUp() throws Exception { - targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); - // In case previous test was killed before cleanup - fsTarget.delete(targetTestRoot, true); + initializeTargetTestRoot(); - fsTarget.mkdirs(targetTestRoot); // Make user and data dirs - we creates links to them in the mount table fsTarget.mkdirs(new Path(targetTestRoot,"user")); fsTarget.mkdirs(new Path(targetTestRoot,"data")); @@ -99,7 +96,16 @@ public class ViewFileSystemBaseTest { fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true); } + void initializeTargetTestRoot() throws IOException { + targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + // In case previous test was killed before cleanup + fsTarget.delete(targetTestRoot, true); + + fsTarget.mkdirs(targetTestRoot); + } + void setupMountPoints() { + ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri()); @@ -121,7 +127,7 @@ public class ViewFileSystemBaseTest { } int getExpectedMountPoints() { - return 7; + return 8; } /** @@ -166,7 +172,7 @@ public class ViewFileSystemBaseTest { } } } - Assert.assertEquals(expectedTokenCount / 2, delTokens.size()); + Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size()); } int getExpectedDelegationTokenCountWithCredentials() { @@ -309,6 +315,16 @@ public class ViewFileSystemBaseTest { Assert.assertTrue("Renamed dest should exist as dir in target", fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar"))); + // Make a directory under a directory that's mounted from the root of another FS + fsView.mkdirs(new Path("/targetRoot/dirFoo")); + Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo"))); + boolean dirFooPresent = false; + for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) { + if (fileStatus.getPath().getName().equals("dirFoo")) { + dirFooPresent = true; + } + } + Assert.assertTrue(dirFooPresent); } // rename across mount points that point to same target also fail @@ -418,7 +434,7 @@ public class ViewFileSystemBaseTest { } int getExpectedDirPaths() { - return 6; + return 7; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index 8622f02ff6b..7f731de23e9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.FileContextTestHelper.fileType; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsConstants; @@ -77,12 +78,8 @@ public class ViewFsBaseTest { @Before public void setUp() throws Exception { - - targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget); - // In case previous test was killed before cleanup - fcTarget.delete(targetTestRoot, true); + initializeTargetTestRoot(); - fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true); // Make user and data dirs - we creates links to them in the mount table fcTarget.mkdir(new Path(targetTestRoot,"user"), FileContext.DEFAULT_PERM, true); @@ -100,6 +97,7 @@ public class ViewFsBaseTest { // Set up the defaultMT in the config with our mount point links conf = new Configuration(); + ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/user2", @@ -118,6 +116,14 @@ public class ViewFsBaseTest { fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); // Also try viewfs://default/ - note authority is name of mount table } + + void initializeTargetTestRoot() throws IOException { + targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget); + // In case previous test was killed before cleanup + fcTarget.delete(targetTestRoot, true); + + fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true); + } @After public void tearDown() throws Exception { @@ -128,7 +134,11 @@ public class ViewFsBaseTest { public void testGetMountPoints() { ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem(); MountPoint[] mountPoints = viewfs.getMountPoints(); - Assert.assertEquals(7, mountPoints.length); + Assert.assertEquals(8, mountPoints.length); + } + + int getExpectedDelegationTokenCount() { + return 0; } /** @@ -140,7 +150,7 @@ public class ViewFsBaseTest { public void testGetDelegationTokens() throws IOException { List> delTokens = fcView.getDelegationTokens(new Path("/"), "sanjay"); - Assert.assertEquals(0, delTokens.size()); + Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size()); } @@ -281,6 +291,19 @@ public class ViewFsBaseTest { Assert.assertTrue("Renamed dest should exist as dir in target", isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar"))); + // Make a directory under a directory that's mounted from the root of another FS + fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false); + Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo"))); + boolean dirFooPresent = false; + RemoteIterator dirContents = fcView.listStatus(new Path( + "/targetRoot/")); + while (dirContents.hasNext()) { + FileStatus fileStatus = dirContents.next(); + if (fileStatus.getPath().getName().equals("dirFoo")) { + dirFooPresent = true; + } + } + Assert.assertTrue(dirFooPresent); } // rename across mount points that point to same target also fail @@ -358,7 +381,7 @@ public class ViewFsBaseTest { FileStatus[] dirPaths = fcView.util().listStatus(new Path("/")); FileStatus fs; - Assert.assertEquals(6, dirPaths.length); + Assert.assertEquals(7, dirPaths.length); fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink", fs.isSymlink()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index b1686d0eb44..35800c16631 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4556,7 +4556,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (destinationExisted && dinfo.isDir()) { Path spath = new Path(src); Path parent = spath.getParent(); - if (isRoot(parent)) { + if (parent.isRoot()) { overwrite = parent.toString(); } else { overwrite = parent.toString() + Path.SEPARATOR; @@ -4569,10 +4569,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, leaseManager.changeLease(src, dst, overwrite, replaceBy); } - - private boolean isRoot(Path path) { - return path.getParent() == null; - } /** * Serializes leases. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java new file mode 100644 index 00000000000..9cc74e32705 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URISyntaxException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +/** + * Make sure that ViewFileSystem works when the root of an FS is mounted to a + * ViewFileSystem mount point. + */ +public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest { + + private static MiniDFSCluster cluster; + private static Configuration CONF = new Configuration(); + private static FileSystem fHdfs; + + @BeforeClass + public static void clusterSetupAtBegining() throws IOException, + LoginException, URISyntaxException { + SupportsBlocks = true; + CONF.setBoolean( + DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + + cluster = new MiniDFSCluster.Builder(CONF) + .numDataNodes(2) + .build(); + cluster.waitClusterUp(); + + fHdfs = cluster.getFileSystem(); + } + + @AfterClass + public static void clusterShutdownAtEnd() throws Exception { + cluster.shutdown(); + } + + @Before + public void setUp() throws Exception { + fsTarget = fHdfs; + super.setUp(); + } + + /** + * Override this so that we don't set the targetTestRoot to any path under the + * root of the FS, and so that we don't try to delete the test dir, but rather + * only its contents. + */ + @Override + void initializeTargetTestRoot() throws IOException { + targetTestRoot = fHdfs.makeQualified(new Path("/")); + for (FileStatus status : fHdfs.listStatus(targetTestRoot)) { + fHdfs.delete(status.getPath(), true); + } + } + + @Override + int getExpectedDelegationTokenCount() { + return 8; + } + + @Override + int getExpectedDelegationTokenCountWithCredentials() { + return 1; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index 7ad56c0e93c..9f71d85f051 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -105,17 +105,17 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { // additional mount. @Override int getExpectedDirPaths() { - return 7; + return 8; } @Override int getExpectedMountPoints() { - return 8; + return 9; } @Override int getExpectedDelegationTokenCount() { - return 8; + return 9; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java new file mode 100644 index 00000000000..449689242d4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URISyntaxException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +/** + * Make sure that ViewFs works when the root of an FS is mounted to a ViewFs + * mount point. + */ +public class TestViewFsAtHdfsRoot extends ViewFsBaseTest { + + private static MiniDFSCluster cluster; + private static HdfsConfiguration CONF = new HdfsConfiguration(); + private static FileContext fc; + + @BeforeClass + public static void clusterSetupAtBegining() throws IOException, + LoginException, URISyntaxException { + SupportsBlocks = true; + CONF.setBoolean( + DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + + cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); + cluster.waitClusterUp(); + fc = FileContext.getFileContext(cluster.getURI(0), CONF); + } + + + @AfterClass + public static void ClusterShutdownAtEnd() throws Exception { + cluster.shutdown(); + } + + @Before + public void setUp() throws Exception { + // create the test root on local_fs + fcTarget = fc; + super.setUp(); + } + + /** + * Override this so that we don't set the targetTestRoot to any path under the + * root of the FS, and so that we don't try to delete the test dir, but rather + * only its contents. + */ + @Override + void initializeTargetTestRoot() throws IOException { + targetTestRoot = fc.makeQualified(new Path("/")); + RemoteIterator dirContents = fc.listStatus(targetTestRoot); + while (dirContents.hasNext()) { + fc.delete(dirContents.next().getPath(), true); + } + } + + /** + * This overrides the default implementation since hdfs does have delegation + * tokens. + */ + @Override + int getExpectedDelegationTokenCount() { + return 8; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java index 0e94b4eb3d2..dc7110cfafe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java @@ -20,7 +20,6 @@ package org.apache.hadoop.fs.viewfs; import java.io.IOException; import java.net.URISyntaxException; -import java.util.List; import javax.security.auth.login.LoginException; @@ -30,20 +29,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; - -import org.junit.After; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Test; - public class TestViewFsHdfs extends ViewFsBaseTest { private static MiniDFSCluster cluster; - private static Path defaultWorkingDirectory; private static HdfsConfiguration CONF = new HdfsConfiguration(); private static FileContext fc; @@ -57,7 +49,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest { cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), CONF); - defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + + Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); } @@ -73,25 +65,15 @@ public class TestViewFsHdfs extends ViewFsBaseTest { // create the test root on local_fs fcTarget = fc; super.setUp(); - - } - - @After - public void tearDown() throws Exception { - super.tearDown(); } - - /* - * This overides the default implementation since hdfs does have delegation + /** + * This overrides the default implementation since hdfs does have delegation * tokens. */ @Override - @Test - public void testGetDelegationTokens() throws IOException { - List> delTokens = - fcView.getDelegationTokens(new Path("/"), "sanjay"); - Assert.assertEquals(7, delTokens.size()); + int getExpectedDelegationTokenCount() { + return 8; } } From e7d03636a098675f88bb3907c8ca6a64ea0ca6d8 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Sun, 6 May 2012 11:20:16 +0000 Subject: [PATCH 21/70] HADOOP-8323. Revert HADOOP-7940, cause it may cause a performance regression. (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334603 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 -- .../src/main/java/org/apache/hadoop/io/Text.java | 1 - .../src/test/java/org/apache/hadoop/io/TestText.java | 10 ---------- 3 files changed, 13 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 558c9b89625..a97d8787694 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -326,8 +326,6 @@ Release 2.0.0 - UNRELEASED HADOOP-8104. Inconsistent Jackson versions (tucu) - HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh) - HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth. (szetszwo) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index 5c52883ebf5..e38dd3c79a5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -239,7 +239,6 @@ public class Text extends BinaryComparable */ public void clear() { length = 0; - bytes = EMPTY_BYTES; } /* diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index a756a57dae7..a7718bfba70 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -192,16 +192,6 @@ public class TestText extends TestCase { assertTrue(text.find("\u20ac", 5)==11); } - public void testClear() { - Text text = new Text(); - assertEquals("", text.toString()); - assertEquals(0, text.getBytes().length); - text = new Text("abcd\u20acbdcd\u20ac"); - text.clear(); - assertEquals("", text.toString()); - assertEquals(0, text.getBytes().length); - } - public void testFindAfterUpdatingContents() throws Exception { Text text = new Text("abcd"); text.set("a".getBytes()); From 8069771a2dd510cc31bea1d2d4a0d8406f50619f Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 7 May 2012 15:49:33 +0000 Subject: [PATCH 22/70] HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335075 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../tools/{DistCp.java => DistCpV1.java} | 14 ++-- .../org/apache/hadoop/tools/Logalyzer.java | 2 +- .../apache/hadoop/tools/TestCopyFiles.java | 72 +++++++++---------- 4 files changed, 47 insertions(+), 44 deletions(-) rename hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/{DistCp.java => DistCpV1.java} (99%) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index a97d8787694..e2c2b1a80a3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -536,6 +536,9 @@ Release 0.23.3 - UNRELEASED HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via bobby) + HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson + via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java similarity index 99% rename from hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCp.java rename to hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java index 5c9203804ac..4472443d334 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCp.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java @@ -78,8 +78,8 @@ import org.apache.hadoop.util.ToolRunner; * A Map-reduce program to recursively copy directories between * different file-systems. */ -public class DistCp implements Tool { - public static final Log LOG = LogFactory.getLog(DistCp.class); +public class DistCpV1 implements Tool { + public static final Log LOG = LogFactory.getLog(DistCpV1.class); private static final String NAME = "distcp"; @@ -221,7 +221,7 @@ public class DistCp implements Tool { return conf; } - public DistCp(Configuration conf) { + public DistCpV1(Configuration conf) { setConf(conf); } @@ -565,7 +565,7 @@ public class DistCp implements Tool { private void updateDestStatus(FileStatus src, FileStatus dst ) throws IOException { if (preserve_status) { - DistCp.updateDestStatus(src, dst, preseved, destFileSys); + DistCpV1.updateDestStatus(src, dst, preseved, destFileSys); } } @@ -1049,8 +1049,8 @@ public class DistCp implements Tool { } public static void main(String[] args) throws Exception { - JobConf job = new JobConf(DistCp.class); - DistCp distcp = new DistCp(job); + JobConf job = new JobConf(DistCpV1.class); + DistCpV1 distcp = new DistCpV1(job); int res = ToolRunner.run(distcp, args); System.exit(res); } @@ -1117,7 +1117,7 @@ public class DistCp implements Tool { //Job configuration private static JobConf createJobConf(Configuration conf) { - JobConf jobconf = new JobConf(conf, DistCp.class); + JobConf jobconf = new JobConf(conf, DistCpV1.class); jobconf.setJobName(conf.get("mapred.job.name", NAME)); // turn off speculative execution, because DFS doesn't handle diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java index ecd8c2dc6b0..acc88ea1c14 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java @@ -194,7 +194,7 @@ public class Logalyzer { throws IOException { String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory; - DistCp.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false); + DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false); } /** diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java index 0dbfde10ed4..2f403f2176f 100644 --- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java +++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpV1; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; import org.junit.Ignore; @@ -64,7 +64,7 @@ public class TestCopyFiles extends TestCase { ).getLogger().setLevel(Level.OFF); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF); - ((Log4JLogger)DistCp.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL); } static final URI LOCAL_FS = URI.create("file:///"); @@ -267,7 +267,7 @@ public class TestCopyFiles extends TestCase { Configuration conf = new Configuration(); FileSystem localfs = FileSystem.get(LOCAL_FS, conf); MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat"); - ToolRunner.run(new DistCp(new Configuration()), + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat"}); assertTrue("Source and destination directories do not match.", @@ -287,7 +287,7 @@ public class TestCopyFiles extends TestCase { namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-log", namenode+"/logs", namenode+"/srcdat", @@ -320,7 +320,7 @@ public class TestCopyFiles extends TestCase { FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration()); fs.mkdirs(new Path("/empty")); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-log", namenode+"/logs", namenode+"/empty", @@ -347,7 +347,7 @@ public class TestCopyFiles extends TestCase { final String namenode = hdfs.getUri().toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-log", namenode+"/logs", "file:///"+TEST_ROOT_DIR+"/srcdat", @@ -376,7 +376,7 @@ public class TestCopyFiles extends TestCase { final String namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-log", "/logs", namenode+"/srcdat", @@ -403,7 +403,7 @@ public class TestCopyFiles extends TestCase { final String namenode = hdfs.getUri().toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-p", "-log", namenode+"/logs", @@ -420,7 +420,7 @@ public class TestCopyFiles extends TestCase { updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate); deldir(hdfs, "/logs"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-prbugp", // no t to avoid preserving mod. times "-update", "-log", @@ -433,7 +433,7 @@ public class TestCopyFiles extends TestCase { checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate)); deldir(hdfs, "/logs"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-prbugp", // no t to avoid preserving mod. times "-overwrite", "-log", @@ -483,7 +483,7 @@ public class TestCopyFiles extends TestCase { out.close(); // Run with -skipcrccheck option - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-p", "-update", "-skipcrccheck", @@ -503,7 +503,7 @@ public class TestCopyFiles extends TestCase { deldir(hdfs, "/logs"); // Run without the option - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-p", "-update", "-log", @@ -533,14 +533,14 @@ public class TestCopyFiles extends TestCase { final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration()); try { MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat"); - ToolRunner.run(new DistCp(new Configuration()), + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/src2/srcdat"}); assertTrue("Source and destination directories do not match.", checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files)); - assertEquals(DistCp.DuplicationException.ERROR_CODE, - ToolRunner.run(new DistCp(new Configuration()), + assertEquals(DistCpV1.DuplicationException.ERROR_CODE, + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/src2/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat",})); @@ -558,7 +558,7 @@ public class TestCopyFiles extends TestCase { try { MyFile[] files = {createFile(root, fs)}; //copy a dir with a single file - ToolRunner.run(new DistCp(new Configuration()), + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat"}); assertTrue("Source and destination directories do not match.", @@ -568,7 +568,7 @@ public class TestCopyFiles extends TestCase { String fname = files[0].getName(); Path p = new Path(root, fname); FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p)); - ToolRunner.run(new DistCp(new Configuration()), + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname, "file:///"+TEST_ROOT_DIR+"/dest2/"+fname}); assertTrue("Source and destination directories do not match.", @@ -578,17 +578,17 @@ public class TestCopyFiles extends TestCase { String[] args = {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+fname, "file:///"+TEST_ROOT_DIR+"/dest2/"+fname}; Configuration conf = new Configuration(); - JobConf job = new JobConf(conf, DistCp.class); - DistCp.Arguments distcpArgs = DistCp.Arguments.valueOf(args, conf); + JobConf job = new JobConf(conf, DistCpV1.class); + DistCpV1.Arguments distcpArgs = DistCpV1.Arguments.valueOf(args, conf); assertFalse("Single file update failed to skip copying even though the " - + "file exists at destination.", DistCp.setup(conf, job, distcpArgs)); + + "file exists at destination.", DistCpV1.setup(conf, job, distcpArgs)); //copy single file to existing dir deldir(fs, TEST_ROOT_DIR+"/dest2"); fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2")); MyFile[] files2 = {createFile(root, fs, 0)}; String sname = files2[0].getName(); - ToolRunner.run(new DistCp(new Configuration()), + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname, "file:///"+TEST_ROOT_DIR+"/dest2/"}); @@ -596,7 +596,7 @@ public class TestCopyFiles extends TestCase { checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2)); updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1); //copy single file to existing dir w/ dst name conflict - ToolRunner.run(new DistCp(new Configuration()), + ToolRunner.run(new DistCpV1(new Configuration()), new String[] {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname, "file:///"+TEST_ROOT_DIR+"/dest2/"}); @@ -621,7 +621,7 @@ public class TestCopyFiles extends TestCase { namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-basedir", "/basedir", namenode+"/basedir/middle/srcdat", @@ -651,7 +651,7 @@ public class TestCopyFiles extends TestCase { for(int i = 0; i < srcstat.length; i++) { fs.setOwner(srcstat[i].getPath(), "u" + i, null); } - ToolRunner.run(new DistCp(conf), + ToolRunner.run(new DistCpV1(conf), new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); @@ -670,7 +670,7 @@ public class TestCopyFiles extends TestCase { for(int i = 0; i < srcstat.length; i++) { fs.setOwner(srcstat[i].getPath(), null, "g" + i); } - ToolRunner.run(new DistCp(conf), + ToolRunner.run(new DistCpV1(conf), new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); @@ -692,7 +692,7 @@ public class TestCopyFiles extends TestCase { fs.setPermission(srcstat[i].getPath(), permissions[i]); } - ToolRunner.run(new DistCp(conf), + ToolRunner.run(new DistCpV1(conf), new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); @@ -715,7 +715,7 @@ public class TestCopyFiles extends TestCase { fs.setTimes(srcstat[i].getPath(), 40, 50); } - ToolRunner.run(new DistCp(conf), + ToolRunner.run(new DistCpV1(conf), new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"}); FileStatus[] dststat = getFileStatus(fs, "/destdat", files); @@ -753,7 +753,7 @@ public class TestCopyFiles extends TestCase { } Configuration job = mr.createJobConf(); job.setLong("distcp.bytes.per.map", totsize / 3); - ToolRunner.run(new DistCp(job), + ToolRunner.run(new DistCpV1(job), new String[] {"-m", "100", "-log", namenode+"/logs", @@ -771,7 +771,7 @@ public class TestCopyFiles extends TestCase { deldir(fs, "/destdat"); deldir(fs, "/logs"); - ToolRunner.run(new DistCp(job), + ToolRunner.run(new DistCpV1(job), new String[] {"-m", "1", "-log", namenode+"/logs", @@ -795,7 +795,7 @@ public class TestCopyFiles extends TestCase { cluster = new MiniDFSCluster(conf, 2, true, null); final String nnUri = FileSystem.getDefaultUri(conf).toString(); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); - final DistCp distcp = new DistCp(conf); + final DistCpV1 distcp = new DistCpV1(conf); final FsShell shell = new FsShell(conf); final String srcrootdir = "/src_root"; @@ -927,9 +927,9 @@ public class TestCopyFiles extends TestCase { final String srcrootdir = srcrootpath.toString(); final Path dstrootpath = new Path(home, "dst_root"); final String dstrootdir = dstrootpath.toString(); - final DistCp distcp = USER_UGI.doAs(new PrivilegedExceptionAction() { - public DistCp run() { - return new DistCp(userConf); + final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction() { + public DistCpV1 run() { + return new DistCpV1(userConf); } }); @@ -961,7 +961,7 @@ public class TestCopyFiles extends TestCase { final String nnUri = nnURI.toString(); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); - final DistCp distcp = new DistCp(conf); + final DistCpV1 distcp = new DistCpV1(conf); final FsShell shell = new FsShell(conf); final String srcrootdir = "/src_root"; @@ -1035,7 +1035,7 @@ public class TestCopyFiles extends TestCase { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); String destdir = TEST_ROOT_DIR + "/destdat"; MyFile[] localFiles = createFiles(localfs, destdir); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-delete", "-update", "-log", @@ -1066,7 +1066,7 @@ public class TestCopyFiles extends TestCase { namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); - ToolRunner.run(new DistCp(conf), new String[] { + ToolRunner.run(new DistCpV1(conf), new String[] { "-log", namenode+"/logs", namenode+"/srcdat/*", From 35a33067bb532f07712369a9ccbcb81516672c78 Mon Sep 17 00:00:00 2001 From: Thomas White Date: Mon, 7 May 2012 16:00:56 +0000 Subject: [PATCH 23/70] HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335085 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/fs/FilterFileSystem.java | 2 +- .../org/apache/hadoop/fs/LocalFileSystem.java | 11 +++++++++++ .../org/apache/hadoop/fs/TestLocalFileSystem.java | 15 +++++++++++++++ 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e2c2b1a80a3..811f79f2042 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -423,6 +423,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm) + HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme. + (tomwhite) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 1794c3d032f..6cbaf591e5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -53,7 +53,7 @@ import org.apache.hadoop.util.Progressable; public class FilterFileSystem extends FileSystem { protected FileSystem fs; - private String swapScheme; + protected String swapScheme; /* * so that extending classes can define it diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java index ac9b25d972a..394c01f7054 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java @@ -39,6 +39,17 @@ public class LocalFileSystem extends ChecksumFileSystem { public LocalFileSystem() { this(new RawLocalFileSystem()); } + + @Override + public void initialize(URI name, Configuration conf) throws IOException { + if (fs.getConf() == null) { + fs.initialize(name, conf); + } + String scheme = name.getScheme(); + if (!scheme.equals(fs.getUri().getScheme())) { + swapScheme = scheme; + } + } /** * Return the protocol scheme for the FileSystem. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 6ccc201c55a..604ea78d0fb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -18,11 +18,14 @@ package org.apache.hadoop.fs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem.Statistics; + import static org.apache.hadoop.fs.FileSystemTestHelper.*; import java.io.*; import static org.junit.Assert.*; + import org.junit.Before; import org.junit.Test; @@ -233,4 +236,16 @@ public class TestLocalFileSystem { assertTrue("Did not delete file", fs.delete(file1)); assertTrue("Did not delete non-empty dir", fs.delete(dir1)); } + + @Test + public void testStatistics() throws Exception { + FileSystem.getLocal(new Configuration()); + int fileSchemeCount = 0; + for (Statistics stats : FileSystem.getAllStatistics()) { + if (stats.getScheme().equals("file")) { + fileSchemeCount++; + } + } + assertEquals(1, fileSchemeCount); + } } From d3599de399e641b8f1ca657edd5c19dade5e6702 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 7 May 2012 16:18:33 +0000 Subject: [PATCH 24/70] MAPREDUCE-4220. RM apps page starttime/endtime sorts are incorrect (Jonathan Eagles via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335096 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../yarn/server/resourcemanager/webapp/AppsBlock.java | 6 ++++-- .../hadoop/yarn/server/resourcemanager/webapp/AppsList.java | 6 ++++-- .../hadoop/yarn/server/resourcemanager/webapp/RmView.java | 4 ++-- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 3c04711f3ed..cc03df767d5 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -458,6 +458,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4048. NullPointerException exception while accessing the Application Master UI (Devaraj K via bobby) + MAPREDUCE-4220. RM apps page starttime/endtime sorts are incorrect + (Jonathan Eagles via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java index 7e8fd746d01..12d5554e9cd 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java @@ -89,9 +89,11 @@ class AppsBlock extends HtmlBlock { td(appInfo.getName()). td(appInfo.getQueue()). td(). - br().$title(startTime)._()._(startTime)._(). + br().$title(String.valueOf(appInfo.getStartTime()))._(). + _(startTime)._(). td(). - br().$title(finishTime)._()._(finishTime)._(). + br().$title(String.valueOf(appInfo.getFinishTime()))._(). + _(finishTime)._(). td(appInfo.getState()). td(appInfo.getFinalStatus()). td(). diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java index 5d0060fe438..415f915cd5f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java @@ -77,8 +77,10 @@ class AppsList implements ToJSON { append(escapeHtml(appInfo.getUser())).append(_SEP). append(escapeJavaScript(escapeHtml(appInfo.getName()))).append(_SEP). append(escapeHtml(appInfo.getQueue())).append(_SEP); - appendSortable(out, startTime).append(startTime).append(_SEP); - appendSortable(out, finishTime).append(finishTime).append(_SEP). + appendSortable(out, appInfo.getStartTime()). + append(startTime).append(_SEP); + appendSortable(out, appInfo.getFinishTime()). + append(finishTime).append(_SEP). append(appInfo.getState()).append(_SEP). append(appInfo.getFinalStatus()).append(_SEP); appendProgressBar(out, appInfo.getProgress()).append(_SEP); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java index cdb36f839a7..cd26955c277 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java @@ -62,10 +62,10 @@ public class RmView extends TwoColumnLayout { private String appsTableInit() { AppsList list = getInstance(AppsList.class); - // id, user, name, queue, starttime, finishtime, state, progress, ui + // id, user, name, queue, starttime, finishtime, state, status, progress, ui StringBuilder init = tableInit(). append(", aoColumns:[{sType:'title-numeric'}, null, null, null, "). - append("null, null , null, "). + append("{sType:'title-numeric'}, {sType:'title-numeric'} , null, "). append("null,{sType:'title-numeric', bSearchable:false}, null]"); // Sort by id upon page load From eca2c850a57f61e40dff734b10a2e057c5bd8cc3 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 7 May 2012 16:42:19 +0000 Subject: [PATCH 25/70] HDFS-3376. DFSClient fails to make connection to DN if there are many unusable cached sockets. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335115 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../apache/hadoop/hdfs/DFSInputStream.java | 8 +++- .../hdfs/TestDataTransferKeepalive.java | 40 +++++++++++++++++++ 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 73c95e14851..d3be603cfed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -606,6 +606,9 @@ Release 2.0.0 - UNRELEASED HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout (todd) + HDFS-3376. DFSClient fails to make connection to DN if there are many + unusable cached sockets (todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 8bbe4f37cb0..d5cd436c468 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -864,7 +864,13 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable // Allow retry since there is no way of knowing whether the cached socket // is good until we actually use it. for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) { - Socket sock = socketCache.get(dnAddr); + Socket sock = null; + // Don't use the cache on the last attempt - it's possible that there + // are arbitrarily many unusable sockets in the cache, but we don't + // want to fail the read. + if (retries < nCachedConnRetry) { + sock = socketCache.get(dnAddr); + } if (sock == null) { fromCache = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 852f3c6801a..1ef4eac997e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.junit.Assert.*; +import java.io.InputStream; import java.io.PrintWriter; import java.net.InetSocketAddress; import java.net.Socket; @@ -40,6 +42,8 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; +import com.google.common.io.NullOutputStream; + public class TestDataTransferKeepalive { Configuration conf = new HdfsConfiguration(); private MiniDFSCluster cluster; @@ -56,6 +60,8 @@ public class TestDataTransferKeepalive { public void setup() throws Exception { conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, KEEPALIVE_TIMEOUT); + conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, + 0); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); @@ -143,6 +149,40 @@ public class TestDataTransferKeepalive { IOUtils.closeStream(stm); } } + + @Test(timeout=30000) + public void testManyClosedSocketsInCache() throws Exception { + // Make a small file + DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); + + // Insert a bunch of dead sockets in the cache, by opening + // many streams concurrently, reading all of the data, + // and then closing them. + InputStream[] stms = new InputStream[5]; + try { + for (int i = 0; i < stms.length; i++) { + stms[i] = fs.open(TEST_FILE); + } + for (InputStream stm : stms) { + IOUtils.copyBytes(stm, new NullOutputStream(), 1024); + } + } finally { + IOUtils.cleanup(null, stms); + } + + DFSClient client = ((DistributedFileSystem)fs).dfs; + assertEquals(5, client.socketCache.size()); + + // Let all the xceivers timeout + Thread.sleep(1500); + assertXceiverCount(0); + + // Client side still has the sockets cached + assertEquals(5, client.socketCache.size()); + + // Reading should not throw an exception. + DFSTestUtil.readFile(fs, TEST_FILE); + } private void assertXceiverCount(int expected) { // Subtract 1, since the DataXceiverServer From 55cd161a354812b01b43f9b8e03842d3fb9954a6 Mon Sep 17 00:00:00 2001 From: Thomas White Date: Mon, 7 May 2012 19:05:59 +0000 Subject: [PATCH 26/70] MAPREDUCE-4226. ConcurrentModificationException in FileSystemCounterGroup. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335197 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../counters/FileSystemCounterGroup.java | 4 +++- .../apache/hadoop/mapred/TestCounters.java | 20 +++++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index cc03df767d5..7c1c8f09fec 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -461,6 +461,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4220. RM apps page starttime/endtime sorts are incorrect (Jonathan Eagles via bobby) + MAPREDUCE-4226. ConcurrentModificationException in FileSystemCounterGroup. + (tomwhite) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java index 7c23561b659..3f3729fb056 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java @@ -23,6 +23,7 @@ import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.Iterator; import java.util.Locale; import java.util.Map; @@ -54,7 +55,8 @@ public abstract class FileSystemCounterGroup // C[] would need Array.newInstance which requires a Class reference. // Just a few local casts probably worth not having to carry it around. - private final Map map = Maps.newTreeMap(); + private final Map map = + new ConcurrentSkipListMap(); private String displayName; private static final Joiner NAME_JOINER = Joiner.on('_'); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java index f494556bac3..74a4744e157 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapred; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.text.ParseException; @@ -203,6 +204,25 @@ public class TestCounters { counters.incrCounter("group1", "counter2", 1); iterator.next(); } + + @Test + public void testFileSystemGroupIteratorConcurrency() { + Counters counters = new Counters(); + // create 2 filesystem counter groups + counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1); + counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1); + + // Iterate over the counters in this group while updating counters in + // the group + Group group = counters.getGroup(FileSystemCounter.class.getName()); + Iterator iterator = group.iterator(); + counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1); + assertTrue(iterator.hasNext()); + iterator.next(); + counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1); + assertTrue(iterator.hasNext()); + iterator.next(); + } public static void main(String[] args) throws IOException { new TestCounters().testCounters(); From 567aed4f2c0a3bac4ef0cd0ebd36e8672001912c Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 7 May 2012 19:56:23 +0000 Subject: [PATCH 27/70] HDFS-3365. Enable users to disable socket caching in DFS client configuration. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335222 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../org/apache/hadoop/hdfs/SocketCache.java | 13 +++++++++ .../org/apache/hadoop/hdfs/TestConnCache.java | 29 +++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d3be603cfed..474e05fc2e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -425,6 +425,9 @@ Release 2.0.0 - UNRELEASED HDFS-2617. Replaced Kerberized SSL for image transfer and fsck with SPNEGO-based solution. (jghoman, tucu, and atm via eli) + HDFS-3365. Enable users to disable socket caching in DFS client + configuration (todd) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java index 508ec61ca28..36b78834ede 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java @@ -47,6 +47,9 @@ class SocketCache { public SocketCache(int capacity) { multimap = LinkedListMultimap.create(); this.capacity = capacity; + if (capacity <= 0) { + LOG.debug("SocketCache disabled in configuration."); + } } /** @@ -55,6 +58,10 @@ class SocketCache { * @return A socket with unknown state, possibly closed underneath. Or null. */ public synchronized Socket get(SocketAddress remote) { + if (capacity <= 0) { // disabled + return null; + } + List socklist = multimap.get(remote); if (socklist == null) { return null; @@ -76,6 +83,12 @@ class SocketCache { * @param sock socket not used by anyone. */ public synchronized void put(Socket sock) { + if (capacity <= 0) { + // Cache disabled. + IOUtils.closeSocket(sock); + return; + } + Preconditions.checkNotNull(sock); SocketAddress remoteAddr = sock.getRemoteSocketAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index 335734d5b63..94f1dedee95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; @@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.junit.Test; @@ -230,6 +232,33 @@ public class TestConnCache { in.close(); } + + /** + * Test that the socket cache can be disabled by setting the capacity to + * 0. Regression test for HDFS-3365. + */ + @Test + public void testDisableCache() throws IOException { + LOG.info("Starting testDisableCache()"); + + // Reading with the normally configured filesystem should + // cache a socket. + DFSTestUtil.readFile(fs, testFile); + assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size()); + + // Configure a new instance with no caching, ensure that it doesn't + // cache anything + Configuration confWithoutCache = new Configuration(fs.getConf()); + confWithoutCache.setInt( + DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0); + FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache); + try { + DFSTestUtil.readFile(fsWithoutCache, testFile); + assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size()); + } finally { + fsWithoutCache.close(); + } + } @AfterClass public static void teardownCluster() throws Exception { From 07280c9761aef3deb41b907a8a95e9ddf78d282a Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 7 May 2012 21:11:01 +0000 Subject: [PATCH 28/70] HADOOP-8359. Fix javadoc warnings in Configuration. Contributed by Anupam Seth git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335258 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/conf/Configuration.java | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 811f79f2042..4e61a5bfd78 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -426,6 +426,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme. (tomwhite) + HADOOP-8359. Fix javadoc warnings in Configuration. (Anupam Seth via + szetszwo) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 044e5cb08a3..d1ef7a49fec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -278,7 +278,7 @@ public class Configuration implements Iterable>, * @param key * @param newKeys * @param customMessage - * @deprecated use {@link addDeprecation(String key, String newKey, + * @deprecated use {@link #addDeprecation(String key, String newKey, String customMessage)} instead */ @Deprecated @@ -328,7 +328,7 @@ public class Configuration implements Iterable>, * * @param key Key that is to be deprecated * @param newKeys list of keys that take up the values of deprecated key - * @deprecated use {@link addDeprecation(String key, String newKey)} instead + * @deprecated use {@link #addDeprecation(String key, String newKey)} instead */ @Deprecated public synchronized static void addDeprecation(String key, String[] newKeys) { From 7aa2889f822a970b8b1edb8bc58aab67412877ae Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 7 May 2012 21:34:30 +0000 Subject: [PATCH 29/70] HDFS-3375. Put client name in DataXceiver thread name for readBlock and keepalive. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335270 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/datanode/DataXceiver.java | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 474e05fc2e6..36b363dfc58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -428,6 +428,9 @@ Release 2.0.0 - UNRELEASED HDFS-3365. Enable users to disable socket caching in DFS client configuration (todd) + HDFS-3375. Put client name in DataXceiver thread name for readBlock + and keepalive (todd) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 4114d7f9cc7..6c280d8767d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -85,6 +85,12 @@ class DataXceiver extends Receiver implements Runnable { private long opStartTime; //the start time of receiving an Op private final SocketInputWrapper socketInputWrapper; + + /** + * Client Name used in previous operation. Not available on first request + * on the socket. + */ + private String previousOpClientName; public static DataXceiver create(Socket s, DataNode dn, DataXceiverServer dataXceiverServer) throws IOException { @@ -122,7 +128,11 @@ class DataXceiver extends Receiver implements Runnable { */ private void updateCurrentThreadName(String status) { StringBuilder sb = new StringBuilder(); - sb.append("DataXceiver for client ").append(remoteAddress); + sb.append("DataXceiver for client "); + if (previousOpClientName != null) { + sb.append(previousOpClientName).append(" at "); + } + sb.append(remoteAddress); if (status != null) { sb.append(" [").append(status).append("]"); } @@ -202,6 +212,8 @@ class DataXceiver extends Receiver implements Runnable { final String clientName, final long blockOffset, final long length) throws IOException { + previousOpClientName = clientName; + OutputStream baseStream = NetUtils.getOutputStream(s, dnConf.socketWriteTimeout); DataOutputStream out = new DataOutputStream(new BufferedOutputStream( @@ -295,7 +307,8 @@ class DataXceiver extends Receiver implements Runnable { final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum) throws IOException { - updateCurrentThreadName("Receiving block " + block + " client=" + clientname); + previousOpClientName = clientname; + updateCurrentThreadName("Receiving block " + block); final boolean isDatanode = clientname.length() == 0; final boolean isClient = !isDatanode; final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW @@ -502,7 +515,7 @@ class DataXceiver extends Receiver implements Runnable { final DatanodeInfo[] targets) throws IOException { checkAccess(null, true, blk, blockToken, Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); - + previousOpClientName = clientName; updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk); final DataOutputStream out = new DataOutputStream( From f0f9a3631fe4950f5cf548f192226836925d0f05 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 8 May 2012 00:06:09 +0000 Subject: [PATCH 30/70] HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces so that INodeFile and INodeFileUnderConstruction do not have to be used in block management. Contributed by John George git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335304 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++ .../blockmanagement/BlockCollection.java | 63 +++++++++++++++++++ .../server/blockmanagement/BlockInfo.java | 9 ++- .../BlockInfoUnderConstruction.java | 2 +- .../server/blockmanagement/BlockManager.java | 59 +++++++++-------- .../blockmanagement/BlockPlacementPolicy.java | 7 +-- .../BlockPlacementPolicyDefault.java | 3 +- .../server/blockmanagement/BlocksMap.java | 5 +- .../MutableBlockCollection.java | 44 +++++++++++++ .../hdfs/server/namenode/FSInodeInfo.java | 38 ----------- .../hdfs/server/namenode/FSNamesystem.java | 6 +- .../hadoop/hdfs/server/namenode/INode.java | 3 +- .../hdfs/server/namenode/INodeFile.java | 11 +++- .../namenode/INodeFileUnderConstruction.java | 4 +- .../server/namenode/NamenodeJspHelper.java | 2 +- .../server/namenode/ha/TestDNFencing.java | 4 +- 16 files changed, 170 insertions(+), 94 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 36b363dfc58..165ec817492 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -431,6 +431,10 @@ Release 2.0.0 - UNRELEASED HDFS-3375. Put client name in DataXceiver thread name for readBlock and keepalive (todd) + HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces + so that INodeFile and INodeFileUnderConstruction do not have to be used in + block management. (John George via szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java new file mode 100644 index 00000000000..f7c33cad011 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.fs.ContentSummary; + +/** + * This interface is used by the block manager to expose a + * few characteristics of a collection of Block/BlockUnderConstruction. + */ +public interface BlockCollection { + /** + * Get the last block of the collection. + * Make sure it has the right type. + */ + public T getLastBlock() throws IOException; + + /** + * Get content summary. + */ + public ContentSummary computeContentSummary(); + + /** @return the number of blocks */ + public int numBlocks(); + + public BlockInfo[] getBlocks(); + /** + * Get preferred block size for the collection + * @return preferred block size in bytes + */ + public long getPreferredBlockSize(); + + /** + * Get block replication for the collection + * @return block replication value + */ + public short getReplication(); + + /** + * Get name of collection. + */ + public String getName(); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index ce3ff8b3ed3..44089478f3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -22,18 +22,17 @@ import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.util.LightWeightGSet; /** * BlockInfo class maintains for a given block - * the {@link INodeFile} it is part of and datanodes where the replicas of + * the {@link BlockCollection} it is part of and datanodes where the replicas of * the block are stored. */ @InterfaceAudience.Private public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { - private INodeFile inode; + private BlockCollection inode; /** For implementing {@link LightWeightGSet.LinkedElement} interface */ private LightWeightGSet.LinkedElement nextLinkedElement; @@ -77,11 +76,11 @@ public class BlockInfo extends Block implements this.inode = from.inode; } - public INodeFile getINode() { + public BlockCollection getINode() { return inode; } - public void setINode(INodeFile inode) { + public void setINode(BlockCollection inode) { this.inode = inode; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index 6509f3d7fa9..5c0db1bb9b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -234,7 +234,7 @@ public class BlockInfoUnderConstruction extends BlockInfo { blockRecoveryId = recoveryId; if (replicas.size() == 0) { NameNode.stateChangeLog.warn("BLOCK*" - + " INodeFileUnderConstruction.initLeaseRecovery:" + + " BlockInfoUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a8121064139..a3c2761eaa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -55,8 +55,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; -import org.apache.hadoop.hdfs.server.namenode.INodeFile; -import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; @@ -384,7 +382,7 @@ public class BlockManager { numReplicas.decommissionedReplicas(); if (block instanceof BlockInfo) { - String fileName = ((BlockInfo)block).getINode().getFullPathName(); + String fileName = ((BlockInfo)block).getINode().getName(); out.print(fileName + ": "); } // l: == live:, d: == decommissioned c: == corrupt e: == excess @@ -460,7 +458,7 @@ public class BlockManager { * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ - public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, + public boolean commitOrCompleteLastBlock(MutableBlockCollection fileINode, Block commitBlock) throws IOException { if(commitBlock == null) return false; // not committing, this is a block allocation retry @@ -472,7 +470,7 @@ public class BlockManager { final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock); if(countNodes(lastBlock).liveReplicas() >= minReplication) - completeBlock(fileINode,fileINode.numBlocks()-1, false); + completeBlock(fileINode, fileINode.numBlocks()-1, false); return b; } @@ -483,7 +481,7 @@ public class BlockManager { * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ - private BlockInfo completeBlock(final INodeFile fileINode, + private BlockInfo completeBlock(final MutableBlockCollection fileINode, final int blkIndex, boolean force) throws IOException { if(blkIndex < 0) return null; @@ -516,7 +514,7 @@ public class BlockManager { return blocksMap.replaceBlock(completeBlock); } - private BlockInfo completeBlock(final INodeFile fileINode, + private BlockInfo completeBlock(final MutableBlockCollection fileINode, final BlockInfo block, boolean force) throws IOException { BlockInfo[] fileBlocks = fileINode.getBlocks(); for(int idx = 0; idx < fileBlocks.length; idx++) @@ -531,7 +529,7 @@ public class BlockManager { * regardless of whether enough replicas are present. This is necessary * when tailing edit logs as a Standby. */ - public BlockInfo forceCompleteBlock(final INodeFile fileINode, + public BlockInfo forceCompleteBlock(final MutableBlockCollection fileINode, final BlockInfoUnderConstruction block) throws IOException { block.commitBlock(block); return completeBlock(fileINode, block, true); @@ -552,7 +550,7 @@ public class BlockManager { * @return the last block locations if the block is partial or null otherwise */ public LocatedBlock convertLastBlockToUnderConstruction( - INodeFileUnderConstruction fileINode) throws IOException { + MutableBlockCollection fileINode) throws IOException { BlockInfo oldBlock = fileINode.getLastBlock(); if(oldBlock == null || fileINode.getPreferredBlockSize() == oldBlock.getNumBytes()) @@ -923,7 +921,7 @@ public class BlockManager { " does not exist. "); } - INodeFile inode = storedBlock.getINode(); + BlockCollection inode = storedBlock.getINode(); if (inode == null) { NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + "block " + storedBlock + @@ -1051,7 +1049,7 @@ public class BlockManager { int requiredReplication, numEffectiveReplicas; List containingNodes, liveReplicaNodes; DatanodeDescriptor srcNode; - INodeFile fileINode = null; + BlockCollection fileINode = null; int additionalReplRequired; int scheduledWork = 0; @@ -1065,7 +1063,7 @@ public class BlockManager { // block should belong to a file fileINode = blocksMap.getINode(block); // abandoned block or block reopened for append - if(fileINode == null || fileINode.isUnderConstruction()) { + if(fileINode == null || fileINode instanceof MutableBlockCollection) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); continue; @@ -1151,7 +1149,7 @@ public class BlockManager { // block should belong to a file fileINode = blocksMap.getINode(block); // abandoned block or block reopened for append - if(fileINode == null || fileINode.isUnderConstruction()) { + if(fileINode == null || fileINode instanceof MutableBlockCollection) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; neededReplications.decrementReplicationIndex(priority); @@ -1916,7 +1914,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numCurrentReplica >= minReplication) { - completeBlock(storedBlock.getINode(), storedBlock, false); + completeBlock((MutableBlockCollection)storedBlock.getINode(), storedBlock, false); } else if (storedBlock.isComplete()) { // check whether safe replication is reached for the block // only complete blocks are counted towards that. @@ -1954,7 +1952,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return block; } assert storedBlock != null : "Block must be stored by now"; - INodeFile fileINode = storedBlock.getINode(); + BlockCollection fileINode = storedBlock.getINode(); assert fileINode != null : "Block must belong to a file"; // add block to the datanode @@ -1981,7 +1979,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numLiveReplicas >= minReplication) { - storedBlock = completeBlock(fileINode, storedBlock, false); + storedBlock = completeBlock((MutableBlockCollection)fileINode, storedBlock, false); } else if (storedBlock.isComplete()) { // check whether safe replication is reached for the block // only complete blocks are counted towards that @@ -1992,7 +1990,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } // if file is under construction, then done for now - if (fileINode.isUnderConstruction()) { + if (fileINode instanceof MutableBlockCollection) { return storedBlock; } @@ -2129,7 +2127,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block * what happened with it. */ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { - INodeFile fileINode = block.getINode(); + BlockCollection fileINode = block.getINode(); if (fileINode == null) { // block does not belong to any file addToInvalidates(block); @@ -2258,7 +2256,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block BlockPlacementPolicy replicator) { assert namesystem.hasWriteLock(); // first form a rack to datanodes map and - INodeFile inode = getINode(b); + BlockCollection inode = getINode(b); final Map> rackMap = new HashMap>(); for(final Iterator iter = nonExcess.iterator(); @@ -2379,7 +2377,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block // necessary. In that case, put block on a possibly-will- // be-replicated list. // - INodeFile fileINode = blocksMap.getINode(block); + BlockCollection fileINode = blocksMap.getINode(block); if (fileINode != null) { namesystem.decrementSafeBlockCount(block); updateNeededReplications(block, -1, 0); @@ -2611,7 +2609,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block NumberReplicas num) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); - INodeFile fileINode = blocksMap.getINode(block); + BlockCollection fileINode = blocksMap.getINode(block); Iterator nodeIter = blocksMap.nodeIterator(block); StringBuilder nodeList = new StringBuilder(); while (nodeIter.hasNext()) { @@ -2624,7 +2622,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas() + ", excess replicas: " + num.excessReplicas() - + ", Is Open File: " + fileINode.isUnderConstruction() + + ", Is Open File: " + (fileINode instanceof MutableBlockCollection) + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); @@ -2639,7 +2637,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block final Iterator it = srcNode.getBlockIterator(); while(it.hasNext()) { final Block block = it.next(); - INodeFile fileINode = blocksMap.getINode(block); + BlockCollection fileINode = blocksMap.getINode(block); short expectedReplication = fileINode.getReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); @@ -2662,7 +2660,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block final Iterator it = srcNode.getBlockIterator(); while(it.hasNext()) { final Block block = it.next(); - INodeFile fileINode = blocksMap.getINode(block); + BlockCollection fileINode = blocksMap.getINode(block); if (fileINode != null) { NumberReplicas num = countNodes(block); @@ -2679,7 +2677,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { decommissionOnlyReplicas++; } - if (fileINode.isUnderConstruction()) { + if (fileINode instanceof MutableBlockCollection) { underReplicatedInOpenFiles++; } } @@ -2782,11 +2780,10 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block /* get replication factor of a block */ private int getReplication(Block block) { - INodeFile fileINode = blocksMap.getINode(block); + BlockCollection fileINode = blocksMap.getINode(block); if (fileINode == null) { // block does not belong to any file return 0; } - assert !fileINode.isDirectory() : "Block cannot belong to a directory."; return fileINode.getReplication(); } @@ -2859,11 +2856,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return this.neededReplications.getCorruptBlockSize(); } - public BlockInfo addINode(BlockInfo block, INodeFile iNode) { + public BlockInfo addINode(BlockInfo block, BlockCollection iNode) { return blocksMap.addINode(block, iNode); } - public INodeFile getINode(Block b) { + public BlockCollection getINode(Block b) { return blocksMap.getINode(b); } @@ -3003,7 +3000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block private static class ReplicationWork { private Block block; - private INodeFile fileINode; + private BlockCollection fileINode; private DatanodeDescriptor srcNode; private List containingNodes; @@ -3014,7 +3011,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block private int priority; public ReplicationWork(Block block, - INodeFile fileINode, + BlockCollection fileINode, DatanodeDescriptor srcNode, List containingNodes, List liveReplicaNodes, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index b333972a262..b07d70ca546 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; -import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.ReflectionUtils; @@ -123,13 +122,13 @@ public abstract class BlockPlacementPolicy { * @return array of DatanodeDescriptor instances chosen as target * and sorted as a pipeline. */ - DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode, + DatanodeDescriptor[] chooseTarget(BlockCollection srcInode, int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, HashMap excludedNodes, long blocksize) { - return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer, + return chooseTarget(srcInode.getName(), numOfReplicas, writer, chosenNodes, excludedNodes, blocksize); } @@ -159,7 +158,7 @@ public abstract class BlockPlacementPolicy { listed in the previous parameter. * @return the replica that is the best candidate for deletion */ - abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode, + abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcInode, Block block, short replicationFactor, Collection existingReplicas, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 058d2e37aaa..da25213cd98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; -import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; @@ -547,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { } @Override - public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, + public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode, Block block, short replicationFactor, Collection first, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index e479954d42f..71fc5443c9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Iterator; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.util.GSet; import org.apache.hadoop.hdfs.util.LightWeightGSet; @@ -93,7 +92,7 @@ class BlocksMap { blocks = null; } - INodeFile getINode(Block b) { + BlockCollection getINode(Block b) { BlockInfo info = blocks.get(b); return (info != null) ? info.getINode() : null; } @@ -101,7 +100,7 @@ class BlocksMap { /** * Add block b belonging to the specified file inode to the map. */ - BlockInfo addINode(BlockInfo b, INodeFile iNode) { + BlockInfo addINode(BlockInfo b, BlockCollection iNode) { BlockInfo info = blocks.get(b); if (info != b) { info = b; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java new file mode 100644 index 00000000000..2b5b3e4dd27 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.fs.ContentSummary; + +/** + * This interface is used by the block manager to expose a + * few characteristics of a collection of Block/BlockUnderConstruction. + */ +public interface MutableBlockCollection extends BlockCollection { + /** + * Set block + */ + public void setBlock(int idx, BlockInfo blk); + + /** + * Convert the last block of the collection to an under-construction block. + * Set its locations. + */ + public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, + DatanodeDescriptor[] targets) throws IOException; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java index 40a474aba77..e69de29bb2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.namenode; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * This interface is used used the pluggable block placement policy - * to expose a few characteristics of an Inode. - */ -@InterfaceAudience.Private -public interface FSInodeInfo { - - /** - * a string representation of an inode - * - * @return the full pathname (from root) that this inode represents - */ - - public String getFullPathName() ; -} - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 35800c16631..62bc1c3ba7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2840,7 +2840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (storedBlock == null) { throw new IOException("Block (=" + lastblock + ") not found"); } - INodeFile iFile = storedBlock.getINode(); + INodeFile iFile = (INodeFile) storedBlock.getINode(); if (!iFile.isUnderConstruction() || storedBlock.isComplete()) { throw new IOException("Unexpected block (=" + lastblock + ") since the file (=" + iFile.getLocalName() @@ -4394,7 +4394,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // check file inode - INodeFile file = storedBlock.getINode(); + INodeFile file = (INodeFile) storedBlock.getINode(); if (file==null || !file.isUnderConstruction()) { throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); @@ -4706,7 +4706,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, while (blkIterator.hasNext()) { Block blk = blkIterator.next(); - INode inode = blockManager.getINode(blk); + INode inode = (INodeFile) blockManager.getINode(blk); skip++; if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { String src = FSDirectory.getFullPathName(inode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 2bc049dbfa7..e940b61ab9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -38,7 +38,7 @@ import com.google.common.primitives.SignedBytes; * directory inodes. */ @InterfaceAudience.Private -abstract class INode implements Comparable, FSInodeInfo { +abstract class INode implements Comparable { /* * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this. @@ -264,7 +264,6 @@ abstract class INode implements Comparable, FSInodeInfo { this.name = name; } - @Override public String getFullPathName() { // Get the full path name of this inode. return FSDirectory.getFullPathName(this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 2b3a0125fe5..55cb68835bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; /** I-node for closed file. */ -public class INodeFile extends INode { +@InterfaceAudience.Private +public class INodeFile extends INode implements BlockCollection { static final FsPermission UMASK = FsPermission.createImmutable((short)0111); //Number of bits for Block size @@ -167,6 +170,12 @@ public class INodeFile extends INode { blocks = null; return 1; } + + public String getName() { + // Get the full path name of this inode. + return getFullPathName(); + } + @Override long[] computeContentSummary(long[] summary) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index c5c47fd6461..03b0fbd2169 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -25,13 +25,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection; import com.google.common.base.Joiner; /** * I-node for file being written. */ -public class INodeFileUnderConstruction extends INodeFile { +public class INodeFileUnderConstruction extends INodeFile + implements MutableBlockCollection { private String clientName; // lease holder private final String clientMachine; private final DatanodeDescriptor clientNode; // if client is a cluster node too. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 2dfa59751ff..b4e48227cf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -734,7 +734,7 @@ class NamenodeJspHelper { this.inode = null; } else { this.block = new Block(blockId); - this.inode = blockManager.getINode(block); + this.inode = (INodeFile) blockManager.getINode(block); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 5e657ded489..2a144b88c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -46,9 +46,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -585,7 +585,7 @@ public class TestDNFencing { } @Override - public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, + public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode, Block block, short replicationFactor, Collection first, Collection second) { From 1d7b9805924a9006571ceca04924adee517ddffa Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 8 May 2012 00:25:34 +0000 Subject: [PATCH 31/70] HDFS-3378. Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335309 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 -- .../src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 165ec817492..449ff1ff5da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -448,6 +448,8 @@ Release 2.0.0 - UNRELEASED HDFS-2476. More CPU efficient data structure for under-replicated, over-replicated, and invalidated blocks. (Tomasz Nykiel via todd) + HDFS-3378. Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT. (eli) + BUG FIXES HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 41d4960d7e9..2843d457f83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -99,8 +99,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address"; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090"; - public static final String DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port"; - public static final int DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490; public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period"; public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60; public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index 621dde03801..1454fdbd6f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -81,7 +81,6 @@ public class HdfsConfiguration extends Configuration { deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY); deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY); deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); - deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY); deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY); deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); From a9808de0d9a73a99c10a3e4290ec20778fed4f24 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 8 May 2012 13:20:56 +0000 Subject: [PATCH 32/70] HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335505 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../apache/hadoop/tools/HadoopArchives.java | 2 +- .../java/org/apache/hadoop/tools/DistCp.java | 9 ++++-- .../org/apache/hadoop/tools/Logalyzer.java | 4 +-- .../dev-support/findbugs-exclude.xml | 31 +++++++++++++++++++ hadoop-tools/hadoop-rumen/pom.xml | 10 ++++++ .../tools/rumen/DeskewedJobTraceReader.java | 4 ++- .../tools/rumen/JobConfPropertyNames.java | 4 ++- .../tools/rumen/LoggedNetworkTopology.java | 4 ++- .../hadoop/tools/rumen/TraceBuilder.java | 3 +- .../WordListAnonymizerUtility.java | 2 +- .../tools/rumen/datatypes/NodeName.java | 12 ++----- .../dev-support/findbugs-exclude.xml | 30 ++++++++++++++++++ hadoop-tools/hadoop-streaming/pom.xml | 10 ++++++ .../apache/hadoop/streaming/StreamJob.java | 14 +++++---- 15 files changed, 114 insertions(+), 27 deletions(-) create mode 100644 hadoop-tools/hadoop-rumen/dev-support/findbugs-exclude.xml create mode 100644 hadoop-tools/hadoop-streaming/dev-support/findbugs-exclude.xml diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4e61a5bfd78..0cc39a58b4a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -545,6 +545,8 @@ Release 0.23.3 - UNRELEASED HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson via bobby) + HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index 8a370a5eb49..98b3c9ca1e9 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -117,7 +117,7 @@ public class HadoopArchives implements Tool { // will when running the mapreduce job. String testJar = System.getProperty(TEST_HADOOP_ARCHIVES_JAR_PATH, null); if (testJar != null) { - ((JobConf)conf).setJar(testJar); + this.conf.setJar(testJar); } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java index 05a581f43fc..523609b5327 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java @@ -136,10 +136,13 @@ public class DistCp extends Configured implements Tool { Job job = null; try { - metaFolder = createMetaFolderPath(); - jobFS = metaFolder.getFileSystem(getConf()); + synchronized(this) { + //Don't cleanup while we are setting up. + metaFolder = createMetaFolderPath(); + jobFS = metaFolder.getFileSystem(getConf()); - job = createJob(); + job = createJob(); + } createInputFileListing(job); job.submit(); diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java index acc88ea1c14..4e7ac35b873 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java @@ -65,9 +65,9 @@ import org.apache.hadoop.mapreduce.lib.map.RegexMapper; public class Logalyzer { // Constants private static Configuration fsConfig = new Configuration(); - public static String SORT_COLUMNS = + public static final String SORT_COLUMNS = "logalizer.logcomparator.sort.columns"; - public static String COLUMN_SEPARATOR = + public static final String COLUMN_SEPARATOR = "logalizer.logcomparator.column.separator"; static { diff --git a/hadoop-tools/hadoop-rumen/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-rumen/dev-support/findbugs-exclude.xml new file mode 100644 index 00000000000..93e48194869 --- /dev/null +++ b/hadoop-tools/hadoop-rumen/dev-support/findbugs-exclude.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index 0c3d2257382..8f5fddd577f 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -90,6 +90,16 @@ + + org.codehaus.mojo + findbugs-maven-plugin + + true + true + ${basedir}/dev-support/findbugs-exclude.xml + Max + + org.apache.maven.plugins maven-antrun-plugin diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java index ab6220ea84a..e651ae8e05e 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/DeskewedJobTraceReader.java @@ -20,6 +20,7 @@ package org.apache.hadoop.tools.rumen; import java.io.Closeable; import java.io.IOException; +import java.io.Serializable; import java.util.Comparator; import java.util.Iterator; import java.util.PriorityQueue; @@ -59,7 +60,8 @@ public class DeskewedJobTraceReader implements Closeable { static final private Log LOG = LogFactory.getLog(DeskewedJobTraceReader.class); - static private class JobComparator implements Comparator { + static private class JobComparator implements Comparator, + Serializable { @Override public int compare(LoggedJob j1, LoggedJob j2) { return (j1.getSubmitTime() < j2.getSubmitTime()) ? -1 : (j1 diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java index 3e60a3627ed..c1ba83ece49 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobConfPropertyNames.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.tools.rumen; +import java.util.Arrays; + import org.apache.hadoop.mapreduce.MRJobConfig; public enum JobConfPropertyNames { @@ -33,6 +35,6 @@ public enum JobConfPropertyNames { } public String[] getCandidates() { - return candidates; + return Arrays.copyOf(candidates, candidates.length); } } diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java index 3d9caf0b352..23bbb98bb00 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.tools.rumen; +import java.io.Serializable; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -68,7 +69,8 @@ public class LoggedNetworkTopology implements DeepCompare { * order. * */ - static class TopoSort implements Comparator { + static class TopoSort implements Comparator, + Serializable { public int compare(LoggedNetworkTopology t1, LoggedNetworkTopology t2) { return t1.name.getValue().compareTo(t2.name.getValue()); } diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TraceBuilder.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TraceBuilder.java index 9a35e84284c..2fb52931232 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TraceBuilder.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/TraceBuilder.java @@ -20,6 +20,7 @@ package org.apache.hadoop.tools.rumen; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; @@ -98,7 +99,7 @@ public class TraceBuilder extends Configured implements Tool { * history file names should result in the order of jobs' submission times. */ private static class HistoryLogsComparator - implements Comparator { + implements Comparator, Serializable { @Override public int compare(FileStatus file1, FileStatus file2) { return file1.getPath().getName().compareTo( diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java index de3fa9c3760..58566268187 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java @@ -27,7 +27,7 @@ import org.apache.commons.lang.StringUtils; * //TODO There is no caching for saving memory. */ public class WordListAnonymizerUtility { - public static final String[] KNOWN_WORDS = + static final String[] KNOWN_WORDS = new String[] {"job", "tmp", "temp", "home", "homes", "usr", "user", "test"}; /** diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java index 862ed5b829d..c0b8d45cf75 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java @@ -93,16 +93,8 @@ public class NodeName implements AnonymizableDataType { } public NodeName(String rName, String hName) { - rName = (rName == null) - ? rName - : rName.length() == 0 - ? null - : rName; - hName = (hName == null) - ? hName - : hName.length() == 0 - ? null - : hName; + rName = (rName == null || rName.length() == 0) ? null : rName; + hName = (hName == null || hName.length() == 0) ? null : hName; if (hName == null) { nodeName = rName; rackName = rName; diff --git a/hadoop-tools/hadoop-streaming/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-streaming/dev-support/findbugs-exclude.xml new file mode 100644 index 00000000000..122c75bbb18 --- /dev/null +++ b/hadoop-tools/hadoop-streaming/dev-support/findbugs-exclude.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index c19872e6303..0ab9f6ccfb1 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -96,6 +96,16 @@ + + org.codehaus.mojo + findbugs-maven-plugin + + true + true + ${basedir}/dev-support/findbugs-exclude.xml + Max + + org.apache.maven.plugins maven-antrun-plugin diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java index 39ba36d1790..b014cde6225 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java @@ -91,7 +91,7 @@ public class StreamJob implements Tool { @Deprecated public StreamJob(String[] argv, boolean mayExit) { this(); - argv_ = argv; + argv_ = Arrays.copyOf(argv, argv.length); this.config_ = new Configuration(); } @@ -113,7 +113,7 @@ public class StreamJob implements Tool { @Override public int run(String[] args) throws Exception { try { - this.argv_ = args; + this.argv_ = Arrays.copyOf(args, args.length); init(); preProcessArgs(); @@ -290,7 +290,7 @@ public class StreamJob implements Tool { LOG.warn("-file option is deprecated, please use generic option" + " -files instead."); - String fileList = null; + StringBuffer fileList = new StringBuffer(); for (String file : values) { packageFiles_.add(file); try { @@ -298,13 +298,15 @@ public class StreamJob implements Tool { Path path = new Path(pathURI); FileSystem localFs = FileSystem.getLocal(config_); String finalPath = path.makeQualified(localFs).toString(); - fileList = fileList == null ? finalPath : fileList + "," + finalPath; + if(fileList.length() > 0) { + fileList.append(','); + } + fileList.append(finalPath); } catch (Exception e) { throw new IllegalArgumentException(e); } } - config_.set("tmpfiles", config_.get("tmpfiles", "") + - (fileList == null ? "" : fileList)); + config_.set("tmpfiles", config_.get("tmpfiles", "") + fileList); validate(packageFiles_); } From aa60da6c2ec049cc70897afee6c368cb70493773 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 8 May 2012 15:07:40 +0000 Subject: [PATCH 33/70] MAPREDUCE-4162. Correctly set token service (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335567 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../org/apache/hadoop/mapred/YarnChild.java | 8 +++-- .../v2/app/client/MRClientService.java | 5 +++ .../app/launcher/ContainerLauncherImpl.java | 14 ++++---- .../mapreduce/v2/app/rm/RMCommunicator.java | 17 +++++----- .../mapreduce/v2/api/MRClientProtocol.java | 7 ++++ .../client/MRClientProtocolPBClientImpl.java | 5 +++ .../hadoop/mapreduce/v2/TestRPCFactories.java | 5 +++ .../org/apache/hadoop/mapreduce/Cluster.java | 19 ++--------- .../mapreduce/v2/hs/HistoryClientService.java | 7 ++-- .../hadoop/mapred/ClientServiceDelegate.java | 20 +++++------ .../apache/hadoop/mapred/NotRunningJob.java | 7 ++++ .../hadoop/mapred/ResourceMgrDelegate.java | 24 +++++++------- .../org/apache/hadoop/mapred/YARNRunner.java | 10 +++--- .../hadoop/mapred/TestClientRedirect.java | 5 +++ .../mapred/TestClientServiceDelegate.java | 13 ++++---- .../mapreduce/security/TestJHSSecurity.java | 9 ++--- .../TestUmbilicalProtocolWithJobToken.java | 7 ++-- .../yarn/api/records/ContainerToken.java | 2 +- .../apache/hadoop/yarn/util/ProtoUtils.java | 25 ++++++++++++++ .../apache/hadoop/yarn/util/BuilderUtils.java | 7 ++-- .../resourcemanager/ClientRMService.java | 3 +- .../amlauncher/AMLauncher.java | 33 +++++++++---------- .../server/TestContainerManagerSecurity.java | 1 - 24 files changed, 147 insertions(+), 108 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 7c1c8f09fec..74cbb3b800f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -321,6 +321,8 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4210. Expose listener address for WebApp (Daryn Sharp via bobby) + MAPREDUCE-4162. Correctly set token service (Daryn Sharp via bobby) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index 01b29eaf178..2e8defbb549 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -50,7 +50,9 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -77,7 +79,8 @@ class YarnChild { String host = args[0]; int port = Integer.parseInt(args[1]); - final InetSocketAddress address = new InetSocketAddress(host, port); + final InetSocketAddress address = + NetUtils.createSocketAddrForHost(host, port); final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]); int jvmIdInt = Integer.parseInt(args[3]); JVMId jvmId = new JVMId(firstTaskid.getJobID(), @@ -214,8 +217,7 @@ class YarnChild { LOG.debug("loading token. # keys =" +credentials.numberOfSecretKeys() + "; from file=" + jobTokenFile); Token jt = TokenCache.getJobToken(credentials); - jt.setService(new Text(address.getAddress().getHostAddress() + ":" - + address.getPort())); + SecurityUtil.setTokenService(jt, address); UserGroupInformation current = UserGroupInformation.getCurrentUser(); current.addToken(jt); for (Token tok : credentials.getAllTokens()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 0bd730b7dd0..341e7215293 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -180,6 +180,11 @@ public class MRClientService extends AbstractService private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + @Override + public InetSocketAddress getConnectAddress() { + return getBindAddress(); + } + private Job verifyAndGetJob(JobId jobID, boolean modifyAccess) throws YarnRemoteException { Job job = appContext.getJob(jobID); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index 46a6111d610..44dd16daa05 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.v2.app.launcher; import java.io.IOException; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.security.PrivilegedAction; import java.util.HashSet; @@ -34,7 +35,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.ShuffleHandler; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.ContainerToken; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.service.AbstractService; +import org.apache.hadoop.yarn.util.ProtoUtils; import org.apache.hadoop.yarn.util.Records; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -321,13 +322,13 @@ public class ContainerLauncherImpl extends AbstractService implements final String containerManagerBindAddr, ContainerToken containerToken) throws IOException { + final InetSocketAddress cmAddr = + NetUtils.createSocketAddr(containerManagerBindAddr); UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (UserGroupInformation.isSecurityEnabled()) { - Token token = new Token( - containerToken.getIdentifier().array(), containerToken - .getPassword().array(), new Text(containerToken.getKind()), - new Text(containerToken.getService())); + Token token = + ProtoUtils.convertFromProtoFormat(containerToken, cmAddr); // the user in createRemoteUser in this context has to be ContainerID user = UserGroupInformation.createRemoteUser(containerID.toString()); user.addToken(token); @@ -338,8 +339,7 @@ public class ContainerLauncherImpl extends AbstractService implements @Override public ContainerManager run() { return (ContainerManager) rpc.getProxy(ContainerManager.class, - NetUtils.createSocketAddr(containerManagerBindAddr), - getConfig()); + cmAddr, getConfig()); } }); return proxy; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 49df2176ef9..b0471e68ca0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -36,6 +36,7 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -133,15 +134,14 @@ public abstract class RMCommunicator extends AbstractService { protected void register() { //Register - String host = clientService.getBindAddress().getAddress() - .getCanonicalHostName(); + InetSocketAddress serviceAddr = clientService.getBindAddress(); try { RegisterApplicationMasterRequest request = recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class); request.setApplicationAttemptId(applicationAttemptId); - request.setHost(host); - request.setRpcPort(clientService.getBindAddress().getPort()); - request.setTrackingUrl(host + ":" + clientService.getHttpPort()); + request.setHost(serviceAddr.getHostName()); + request.setRpcPort(serviceAddr.getPort()); + request.setTrackingUrl(serviceAddr.getHostName() + ":" + clientService.getHttpPort()); RegisterApplicationMasterResponse response = scheduler.registerApplicationMaster(request); minContainerCapability = response.getMinimumResourceCapability(); @@ -262,9 +262,6 @@ public abstract class RMCommunicator extends AbstractService { if (UserGroupInformation.isSecurityEnabled()) { String tokenURLEncodedStr = System.getenv().get( ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME); - if (LOG.isDebugEnabled()) { - LOG.debug("AppMasterToken is " + tokenURLEncodedStr); - } Token token = new Token(); try { @@ -273,6 +270,10 @@ public abstract class RMCommunicator extends AbstractService { throw new YarnException(e); } + SecurityUtil.setTokenService(token, serviceAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("AppMasterToken is " + token); + } currentUser.addToken(token); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java index bc590b606a3..08166b96b18 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java @@ -18,6 +18,8 @@ package org.apache.hadoop.mapreduce.v2.api; +import java.net.InetSocketAddress; + import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; @@ -45,6 +47,11 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; public interface MRClientProtocol { + /** + * Address to which the client is connected + * @return InetSocketAddress + */ + public InetSocketAddress getConnectAddress(); public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException; public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws YarnRemoteException; public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws YarnRemoteException; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java index cf14532902c..3ab3f0c3b8b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java @@ -104,6 +104,11 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol { MRClientProtocolPB.class, clientVersion, addr, conf); } + @Override + public InetSocketAddress getConnectAddress() { + return RPC.getServerAddress(proxy); + } + @Override public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java index 9401f4b585a..c76328d5056 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java @@ -122,6 +122,11 @@ public class TestRPCFactories { public class MRClientProtocolTestImpl implements MRClientProtocol { + @Override + public InetSocketAddress getConnectAddress() { + return null; + } + @Override public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index eb838fe8a7a..e456a7afa88 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -35,13 +35,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Master; import org.apache.hadoop.mapreduce.protocol.ClientProtocol; import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider; import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.mapreduce.util.ConfigUtil; import org.apache.hadoop.mapreduce.v2.LogParams; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; @@ -388,21 +386,8 @@ public class Cluster { */ public Token getDelegationToken(Text renewer) throws IOException, InterruptedException{ - Token result = - client.getDelegationToken(renewer); - - if (result == null) { - return result; - } - - InetSocketAddress addr = Master.getMasterAddress(conf); - StringBuilder service = new StringBuilder(); - service.append(NetUtils.normalizeHostName(addr.getAddress(). - getHostAddress())); - service.append(':'); - service.append(addr.getPort()); - result.setService(new Text(service.toString())); - return result; + // client has already set the service + return client.getDelegationToken(renewer); } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index b9ecb98a8a6..388356f01ab 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -178,6 +178,10 @@ public class HistoryClientService extends AbstractService { private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + public InetSocketAddress getConnectAddress() { + return getBindAddress(); + } + private Job verifyAndGetJob(final JobId jobID) throws YarnRemoteException { UserGroupInformation loginUgi = null; Job job = null; @@ -335,8 +339,7 @@ public class HistoryClientService extends AbstractService { jhsDTSecretManager); DelegationToken mrDToken = BuilderUtils.newDelegationToken( realJHSToken.getIdentifier(), realJHSToken.getKind().toString(), - realJHSToken.getPassword(), bindAddress.getAddress().getHostAddress() - + ":" + bindAddress.getPort()); + realJHSToken.getPassword(), realJHSToken.getService().toString()); response.setDelegationToken(mrDToken); return response; } catch (IOException i) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java index c2a373750cc..0143cb73913 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java @@ -32,7 +32,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.MRJobConfig; @@ -63,6 +62,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.YarnException; @@ -144,7 +144,7 @@ public class ClientServiceDelegate { if (application != null) { trackingUrl = application.getTrackingUrl(); } - String serviceAddr = null; + InetSocketAddress serviceAddr = null; while (application == null || YarnApplicationState.RUNNING == application .getYarnApplicationState()) { @@ -172,25 +172,23 @@ public class ClientServiceDelegate { if(!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) { UserGroupInformation newUgi = UserGroupInformation.createRemoteUser( UserGroupInformation.getCurrentUser().getUserName()); - serviceAddr = application.getHost() + ":" + application.getRpcPort(); + serviceAddr = NetUtils.createSocketAddrForHost( + application.getHost(), application.getRpcPort()); if (UserGroupInformation.isSecurityEnabled()) { String clientTokenEncoded = application.getClientToken(); Token clientToken = new Token(); clientToken.decodeFromUrlString(clientTokenEncoded); // RPC layer client expects ip:port as service for tokens - InetSocketAddress addr = NetUtils.createSocketAddr(application - .getHost(), application.getRpcPort()); - clientToken.setService(new Text(addr.getAddress().getHostAddress() - + ":" + addr.getPort())); + SecurityUtil.setTokenService(clientToken, serviceAddr); newUgi.addToken(clientToken); } LOG.debug("Connecting to " + serviceAddr); - final String tempStr = serviceAddr; + final InetSocketAddress finalServiceAddr = serviceAddr; realProxy = newUgi.doAs(new PrivilegedExceptionAction() { @Override public MRClientProtocol run() throws IOException { - return instantiateAMProxy(tempStr); + return instantiateAMProxy(finalServiceAddr); } }); } else { @@ -270,13 +268,13 @@ public class ClientServiceDelegate { return historyServerProxy; } - MRClientProtocol instantiateAMProxy(final String serviceAddr) + MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr) throws IOException { LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr); YarnRPC rpc = YarnRPC.create(conf); MRClientProtocol proxy = (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, - NetUtils.createSocketAddr(serviceAddr), conf); + serviceAddr, conf); LOG.trace("Connected to ApplicationMaster at: " + serviceAddr); return proxy; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java index 25069cccf1e..3d00e8af8c9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapred; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.HashMap; @@ -209,4 +210,10 @@ public class NotRunningJob implements MRClientProtocol { /* Should not be invoked by anyone. */ throw new NotImplementedException(); } + + @Override + public InetSocketAddress getConnectAddress() { + /* Should not be invoked by anyone. Normally used to set token service */ + throw new NotImplementedException(); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index 79a1d27c2db..62b608aca47 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -37,8 +37,6 @@ import org.apache.hadoop.mapreduce.QueueInfo; import org.apache.hadoop.mapreduce.TaskTrackerInfo; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -67,14 +65,14 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.util.ProtoUtils; // TODO: This should be part of something like yarn-client. public class ResourceMgrDelegate { private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class); - private final String rmAddress; + private final InetSocketAddress rmAddress; private YarnConfiguration conf; ClientRMProtocol applicationsManager; private ApplicationId applicationId; @@ -87,11 +85,7 @@ public class ResourceMgrDelegate { public ResourceMgrDelegate(YarnConfiguration conf) { this.conf = conf; YarnRPC rpc = YarnRPC.create(this.conf); - InetSocketAddress rmAddress = conf.getSocketAddr( - YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_PORT); - this.rmAddress = rmAddress.toString(); + this.rmAddress = getRmAddress(conf); LOG.debug("Connecting to ResourceManager at " + rmAddress); applicationsManager = (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, @@ -109,7 +103,13 @@ public class ResourceMgrDelegate { ClientRMProtocol applicationsManager) { this.conf = conf; this.applicationsManager = applicationsManager; - this.rmAddress = applicationsManager.toString(); + this.rmAddress = getRmAddress(conf); + } + + private static InetSocketAddress getRmAddress(YarnConfiguration conf) { + return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); } public void cancelDelegationToken(Token arg0) @@ -168,9 +168,7 @@ public class ResourceMgrDelegate { org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse response = applicationsManager.getDelegationToken(rmDTRequest); DelegationToken yarnToken = response.getRMDelegationToken(); - return new Token(yarnToken.getIdentifier().array(), - yarnToken.getPassword().array(), - new Text(yarnToken.getKind()), new Text(yarnToken.getService())); + return ProtoUtils.convertFromProtoFormat(yarnToken, rmAddress); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 3b00ddf83c5..e6358de35de 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -56,7 +56,6 @@ import org.apache.hadoop.mapreduce.protocol.ClientProtocol; import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.mapreduce.v2.LogParams; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; -import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.mapreduce.v2.util.MRApps; @@ -84,6 +83,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.ProtoUtils; /** @@ -184,7 +184,7 @@ public class YARNRunner implements ClientProtocol { return resMgrDelegate.getClusterMetrics(); } - private Token getDelegationTokenFromHS( + private Token getDelegationTokenFromHS( MRClientProtocol hsProxy, Text renewer) throws IOException, InterruptedException { GetDelegationTokenRequest request = recordFactory @@ -192,10 +192,8 @@ public class YARNRunner implements ClientProtocol { request.setRenewer(renewer.toString()); DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request) .getDelegationToken(); - return new Token(mrDelegationToken - .getIdentifier().array(), mrDelegationToken.getPassword().array(), - new Text(mrDelegationToken.getKind()), new Text( - mrDelegationToken.getService())); + return ProtoUtils.convertFromProtoFormat(mrDelegationToken, + hsProxy.getConnectAddress()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index 9d4efe639c7..095d3fd9301 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -368,6 +368,11 @@ public class TestClientRedirect { this(AMHOSTADDRESS); } + @Override + public InetSocketAddress getConnectAddress() { + return bindAddress; + } + public AMService(String hostAddress) { super("AMService"); this.protocol = MRClientProtocol.class; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java index 55cfeeb9442..a3940054c59 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java @@ -27,6 +27,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collection; @@ -242,7 +243,7 @@ public class TestClientServiceDelegate { // should use the same proxy to AM2 and so instantiateProxy shouldn't be // called. doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when( - clientServiceDelegate).instantiateAMProxy(any(String.class)); + clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class)); JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); @@ -257,7 +258,7 @@ public class TestClientServiceDelegate { Assert.assertEquals("jobName-secondGen", jobStatus.getJobName()); verify(clientServiceDelegate, times(2)).instantiateAMProxy( - any(String.class)); + any(InetSocketAddress.class)); } @Test @@ -286,19 +287,19 @@ public class TestClientServiceDelegate { Assert.assertEquals("N/A", jobStatus.getJobName()); verify(clientServiceDelegate, times(0)).instantiateAMProxy( - any(String.class)); + any(InetSocketAddress.class)); // Should not reach AM even for second and third times too. jobStatus = clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A", jobStatus.getJobName()); verify(clientServiceDelegate, times(0)).instantiateAMProxy( - any(String.class)); + any(InetSocketAddress.class)); jobStatus = clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A", jobStatus.getJobName()); verify(clientServiceDelegate, times(0)).instantiateAMProxy( - any(String.class)); + any(InetSocketAddress.class)); // The third time around, app is completed, so should go to JHS JobStatus jobStatus1 = clientServiceDelegate.getJobStatus(oldJobId); @@ -309,7 +310,7 @@ public class TestClientServiceDelegate { Assert.assertEquals(1.0f, jobStatus1.getReduceProgress()); verify(clientServiceDelegate, times(0)).instantiateAMProxy( - any(String.class)); + any(InetSocketAddress.class)); } @Test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java index 20c00b1da08..792806b624c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java @@ -26,11 +26,9 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; -import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer; @@ -38,11 +36,11 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.util.ProtoUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -95,9 +93,8 @@ public class TestJHSSecurity { // Now try talking to JHS using the delegation token UserGroupInformation ugi = UserGroupInformation.createRemoteUser("TheDarkLord"); - ugi.addToken(new Token(token.getIdentifier() - .array(), token.getPassword().array(), new Text(token.getKind()), - new Text(token.getService()))); + ugi.addToken(ProtoUtils.convertFromProtoFormat( + token, jobHistoryServer.getClientService().getBindAddress())); final YarnRPC rpc = YarnRPC.create(conf); MRClientProtocol userUsingDT = ugi.doAs(new PrivilegedAction() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java index dd4b3489750..8167102ab86 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java @@ -47,6 +47,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.SaslInputStream; import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; @@ -98,10 +99,8 @@ public class TestUmbilicalProtocolWithJobToken { JobTokenIdentifier tokenId = new JobTokenIdentifier(new Text(jobId)); Token token = new Token(tokenId, sm); sm.addTokenForJob(jobId, token); - Text host = new Text(addr.getAddress().getHostAddress() + ":" - + addr.getPort()); - token.setService(host); - LOG.info("Service IP address for token is " + host); + SecurityUtil.setTokenService(token, addr); + LOG.info("Service address for token is " + token.getService()); current.addToken(token); current.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java index da34f71f927..0e0e8edb16f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.api.ContainerManager; */ @Public @Stable -public interface ContainerToken { +public interface ContainerToken extends DelegationToken { /** * Get the token identifier. * @return token identifier diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java index 6bdc1dfcd0e..5a73eabce1d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java @@ -18,11 +18,17 @@ package org.apache.hadoop.yarn.util; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; @@ -192,4 +198,23 @@ public class ProtoUtils { return ApplicationAccessType.valueOf(e.name().replace( APP_ACCESS_TYPE_PREFIX, "")); } + + /** + * Convert a protobuf token into a rpc token and set its service + * + * @param protoToken the yarn token + * @param serviceAddr the connect address for the service + * @return rpc token + */ + public static Token + convertFromProtoFormat(DelegationToken protoToken, InetSocketAddress serviceAddr) { + Token token = new Token(protoToken.getIdentifier().array(), + protoToken.getPassword().array(), + new Text(protoToken.getKind()), + new Text(protoToken.getService())); + if (serviceAddr != null) { + SecurityUtil.setTokenService(token, serviceAddr); + } + return token; + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java index 413817ac586..9fadd09fc16 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -275,10 +276,10 @@ public class BuilderUtils { containerToken.setKind(ContainerTokenIdentifier.KIND.toString()); containerToken.setPassword(password); // RPC layer client expects ip:port as service for tokens - InetSocketAddress addr = NetUtils.createSocketAddr(nodeId.getHost(), + InetSocketAddress addr = NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort()); - containerToken.setService(addr.getAddress().getHostAddress() + ":" - + addr.getPort()); + // NOTE: use SecurityUtil.setTokenService if this becomes a "real" token + containerToken.setService(SecurityUtil.buildTokenService(addr).toString()); return containerToken; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 8a007d536c8..b4be6fcf677 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -464,8 +464,7 @@ public class ClientRMService extends AbstractService implements realRMDTtoken.getIdentifier(), realRMDTtoken.getKind().toString(), realRMDTtoken.getPassword(), - clientBindAddress.getAddress().getHostAddress() + ":" - + clientBindAddress.getPort() + realRMDTtoken.getService().toString() )); return response; } catch(IOException io) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java index 114dc977b59..aa9d2c245da 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java @@ -32,9 +32,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; @@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; -import org.apache.hadoop.yarn.api.records.ContainerToken; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent; +import org.apache.hadoop.yarn.util.ProtoUtils; /** * The launch of the AM itself. @@ -131,27 +132,25 @@ public class AMLauncher implements Runnable { Container container = application.getMasterContainer(); - final String containerManagerBindAddress = container.getNodeId().toString(); + final NodeId node = container.getNodeId(); + final InetSocketAddress containerManagerBindAddress = + NetUtils.createSocketAddrForHost(node.getHost(), node.getPort()); final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again. UserGroupInformation currentUser = UserGroupInformation .createRemoteUser(containerId.toString()); if (UserGroupInformation.isSecurityEnabled()) { - ContainerToken containerToken = container.getContainerToken(); Token token = - new Token( - containerToken.getIdentifier().array(), - containerToken.getPassword().array(), new Text( - containerToken.getKind()), new Text( - containerToken.getService())); + ProtoUtils.convertFromProtoFormat(container.getContainerToken(), + containerManagerBindAddress); currentUser.addToken(token); } return currentUser.doAs(new PrivilegedAction() { @Override public ContainerManager run() { return (ContainerManager) rpc.getProxy(ContainerManager.class, - NetUtils.createSocketAddr(containerManagerBindAddress), conf); + containerManagerBindAddress, conf); } }); } @@ -218,22 +217,21 @@ public class AMLauncher implements Runnable { Token token = new Token(id, this.rmContext.getApplicationTokenSecretManager()); - InetSocketAddress unresolvedAddr = conf.getSocketAddr( + InetSocketAddress serviceAddr = conf.getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); - String resolvedAddr = - unresolvedAddr.getAddress().getHostAddress() + ":" - + unresolvedAddr.getPort(); - token.setService(new Text(resolvedAddr)); + // normally the client should set the service after acquiring the token, + // but this token is directly provided to the tasks + SecurityUtil.setTokenService(token, serviceAddr); String appMasterTokenEncoded = token.encodeToUrlString(); - LOG.debug("Putting appMaster token in env : " + appMasterTokenEncoded); + LOG.debug("Putting appMaster token in env : " + token); environment.put( ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME, appMasterTokenEncoded); // Add the RM token - credentials.addToken(new Text(resolvedAddr), token); + credentials.addToken(token.getService(), token); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); container.setContainerTokens( @@ -245,7 +243,6 @@ public class AMLauncher implements Runnable { this.clientToAMSecretManager.getMasterKey(identifier); String encoded = Base64.encodeBase64URLSafeString(clientSecretKey.getEncoded()); - LOG.debug("The encoded client secret-key to be put in env : " + encoded); environment.put( ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME, encoded); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index cf5629fc50f..4594c05ca36 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -401,7 +401,6 @@ public class TestContainerManagerSecurity { appTokenSecretManager); SecurityUtil.setTokenService(appToken, schedulerAddr); currentUser.addToken(appToken); - SecurityUtil.setTokenService(appToken, schedulerAddr); AMRMProtocol scheduler = currentUser .doAs(new PrivilegedAction() { From 67a514b64775f14fa3f40e6bdc6b6f69e64816aa Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 8 May 2012 15:25:08 +0000 Subject: [PATCH 34/70] MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335585 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop/mapreduce/security/TokenCache.java | 7 ++++++- .../mapreduce/security/TestTokenCache.java | 20 +++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 74cbb3b800f..868eca2cc36 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -325,6 +325,9 @@ Release 0.23.3 - UNRELEASED OPTIMIZATIONS + MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn + Sharp via bobby) + BUG FIXES MAPREDUCE-4092. commitJob Exception does not fail job (Jon Eagles via diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java index ef25939ebdb..9e8c1909a13 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java @@ -19,7 +19,9 @@ package org.apache.hadoop.mapreduce.security; import java.io.IOException; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -92,8 +94,11 @@ public class TokenCache { static void obtainTokensForNamenodesInternal(Credentials credentials, Path[] ps, Configuration conf) throws IOException { + Set fsSet = new HashSet(); for(Path p: ps) { - FileSystem fs = FileSystem.get(p.toUri(), conf); + fsSet.add(p.getFileSystem(conf)); + } + for (FileSystem fs : fsSet) { obtainTokensForNamenodesInternal(fs, credentials, conf); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java index b0e9350dbed..1ae2ecde1e2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java @@ -251,6 +251,26 @@ public class TestTokenCache { return mockFs; } + @Test + public void testSingleTokenFetch() throws Exception { + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM"); + String renewer = Master.getMasterPrincipal(conf); + Credentials credentials = new Credentials(); + + FileSystem mockFs = mock(FileSystem.class); + when(mockFs.getCanonicalServiceName()).thenReturn("host:0"); + when(mockFs.getUri()).thenReturn(new URI("mockfs://host:0")); + + Path mockPath = mock(Path.class); + when(mockPath.getFileSystem(conf)).thenReturn(mockFs); + + Path[] paths = new Path[]{ mockPath, mockPath }; + when(mockFs.getDelegationTokens("me", credentials)).thenReturn(null); + TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf); + verify(mockFs, times(1)).getDelegationTokens(renewer, credentials); + } + @Test public void testCleanUpTokenReferral() throws Exception { Configuration conf = new Configuration(); From 0caac704fb07f7c10916ccef58f469104ab79fd3 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Tue, 8 May 2012 17:13:33 +0000 Subject: [PATCH 35/70] MAPREDUCE-4215. RM app page shows 500 error on appid parse error (Jonathon Eagles via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335647 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../yarn/server/resourcemanager/webapp/AppBlock.java | 12 ++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 868eca2cc36..af4ea9aeb8f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -469,6 +469,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4226. ConcurrentModificationException in FileSystemCounterGroup. (tomwhite) + MAPREDUCE-4215. RM app page shows 500 error on appid parse error + (Jonathon Eagles via tgraves) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java index 9927b3221e9..54ac79bc887 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java @@ -55,7 +55,15 @@ public class AppBlock extends HtmlBlock { puts("Bad request: requires application ID"); return; } - ApplicationId appID = Apps.toAppID(aid); + + ApplicationId appID = null; + try { + appID = Apps.toAppID(aid); + } catch (Exception e) { + puts("Invalid Application ID: " + aid); + return; + } + RMContext context = getInstance(RMContext.class); RMApp rmApp = context.getRMApps().get(appID); if (rmApp == null) { @@ -74,7 +82,7 @@ public class AppBlock extends HtmlBlock { && !this.aclsManager.checkAccess(callerUGI, ApplicationAccessType.VIEW_APP, app.getUser(), appID)) { puts("You (User " + remoteUser - + ") are not authorized to view the logs for application " + appID); + + ") are not authorized to view application " + appID); return; } From f6d621834889ac7e914242305bd2bfc12a7e9a78 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 8 May 2012 17:53:15 +0000 Subject: [PATCH 36/70] MAPREDUCE-4231. Update RAID to use the new BlockCollection interface. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335661 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../blockmanagement/BlockPlacementPolicyRaid.java | 12 ++++++------ .../TestBlockPlacementPolicyRaid.java | 11 +++++------ .../hdfs/server/namenode/NameNodeRaidTestUtil.java | 7 ++++--- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index af4ea9aeb8f..dd6f4d7e7d5 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -284,6 +284,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState (Bikas Saha via bobby) + MAPREDUCE-4231. Update RAID to use the new BlockCollection interface. + (szetszwo) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java index aea009e3e70..77977118f2a 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java @@ -144,7 +144,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { /** {@inheritDoc} */ @Override - public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, + public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode, Block block, short replicationFactor, Collection first, Collection second) { @@ -425,7 +425,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { } /** - * Cache results for FSInodeInfo.getFullPathName() + * Cache results for getFullPathName() */ static class CachedFullPathNames { FSNamesystem namesystem; @@ -446,8 +446,8 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { }; static private class INodeWithHashCode { - FSInodeInfo inode; - INodeWithHashCode(FSInodeInfo inode) { + BlockCollection inode; + INodeWithHashCode(BlockCollection inode) { this.inode = inode; } @Override @@ -459,11 +459,11 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { return System.identityHashCode(inode); } String getFullPathName() { - return inode.getFullPathName(); + return inode.getName(); } } - public String get(FSInodeInfo inode) throws IOException { + public String get(BlockCollection inode) throws IOException { return cacheInternal.get(new INodeWithHashCode(inode)); } } diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java b/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java index dd9d012493c..19d81434647 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedFullPathNames; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedLocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.FileType; -import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.NameNodeRaidTestUtil; @@ -241,7 +240,7 @@ public class TestBlockPlacementPolicyRaid { // test full path cache CachedFullPathNames cachedFullPathNames = new CachedFullPathNames(namesystem); - final FSInodeInfo[] inodes = NameNodeRaidTestUtil.getFSInodeInfo( + final BlockCollection[] inodes = NameNodeRaidTestUtil.getBlockCollections( namesystem, file1, file2); verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]); @@ -477,14 +476,14 @@ public class TestBlockPlacementPolicyRaid { } private void verifyCachedFullPathNameResult( - CachedFullPathNames cachedFullPathNames, FSInodeInfo inode) + CachedFullPathNames cachedFullPathNames, BlockCollection inode) throws IOException { - String res1 = inode.getFullPathName(); + String res1 = inode.getName(); String res2 = cachedFullPathNames.get(inode); LOG.info("Actual path name: " + res1); LOG.info("Cached path name: " + res2); Assert.assertEquals(cachedFullPathNames.get(inode), - inode.getFullPathName()); + inode.getName()); } private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks, @@ -503,7 +502,7 @@ public class TestBlockPlacementPolicyRaid { private Collection getCompanionBlocks( FSNamesystem namesystem, BlockPlacementPolicyRaid policy, ExtendedBlock block) throws IOException { - INodeFile inode = blockManager.blocksMap.getINode(block + INodeFile inode = (INodeFile)blockManager.blocksMap.getINode(block .getLocalBlock()); FileType type = policy.getFileType(inode.getFullPathName()); return policy.getCompanionBlocks(inode.getFullPathName(), type, diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java b/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java index 41960f8a175..967df15632c 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java @@ -18,16 +18,17 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; public class NameNodeRaidTestUtil { - public static FSInodeInfo[] getFSInodeInfo(final FSNamesystem namesystem, + public static BlockCollection[] getBlockCollections(final FSNamesystem namesystem, final String... files) throws UnresolvedLinkException { - final FSInodeInfo[] inodes = new FSInodeInfo[files.length]; + final BlockCollection[] inodes = new BlockCollection[files.length]; final FSDirectory dir = namesystem.dir; dir.readLock(); try { for(int i = 0; i < files.length; i++) { - inodes[i] = dir.rootDir.getNode(files[i], true); + inodes[i] = (BlockCollection)dir.rootDir.getNode(files[i], true); } return inodes; } finally { From 408e558d8d44c07973054229bff422a6c327abf0 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Tue, 8 May 2012 19:45:38 +0000 Subject: [PATCH 37/70] HDFS-3157. Error in deleting block is keep on coming from DN even after the block report and directory scanning has happened. Contributed by Ashish Singhi. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335719 13f79535-47bb-0310-9956-ffa450edef68 --- .../server/blockmanagement/BlockManager.java | 6 +- .../TestRBWBlockInvalidation.java | 104 ++++++++++++++++++ .../server/datanode/DataNodeTestUtils.java | 5 + .../fsdataset/impl/FsDatasetTestUtil.java | 6 + 4 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a3c2761eaa6..d552eab42e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1802,7 +1802,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block case COMPLETE: case COMMITTED: if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) { - return new BlockToMarkCorrupt(storedBlock, + return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock + .getINode().getReplication()), "block is " + ucState + " and reported genstamp " + iblk.getGenerationStamp() + " does not match " + "genstamp in block map " + storedBlock.getGenerationStamp()); @@ -1822,7 +1823,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if (!storedBlock.isComplete()) { return null; // not corrupt } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) { - return new BlockToMarkCorrupt(storedBlock, + return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock + .getINode().getReplication()), "reported " + reportedState + " replica with genstamp " + iblk.getGenerationStamp() + " does not match COMPLETE block's " + "genstamp in block map " + storedBlock.getGenerationStamp()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java new file mode 100644 index 00000000000..e45dd6a0a62 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.File; +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; + +import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Test when RBW block is removed. Invalidation of the corrupted block happens + * and then the under replicated block gets replicated to the datanode. + */ +public class TestRBWBlockInvalidation { + private static NumberReplicas countReplicas(final FSNamesystem namesystem, + ExtendedBlock block) { + return namesystem.getBlockManager().countNodes(block.getLocalBlock()); + } + + /** + * Test when a block's replica is removed from RBW folder in one of the + * datanode, namenode should ask to invalidate that corrupted block and + * schedule replication for one more replica for that under replicated block. + */ + @Test + public void testBlockInvalidationWhenRBWReplicaMissedInDN() + throws IOException, InterruptedException { + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) + .build(); + FSDataOutputStream out = null; + try { + final FSNamesystem namesystem = cluster.getNamesystem(); + FileSystem fs = cluster.getFileSystem(); + Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1"); + out = fs.create(testPath, (short) 3); + out.writeBytes("HDFS-3157: " + testPath); + out.hsync(); + String bpid = namesystem.getBlockPoolId(); + ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath); + Block block = blk.getLocalBlock(); + // Deleting partial block and its meta information from the RBW folder + // of first datanode. + DataNode dn = cluster.getDataNodes().get(0); + File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block); + File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block); + assertTrue("Could not delete the block file from the RBW folder", + blockFile.delete()); + assertTrue("Could not delete the block meta file from the RBW folder", + metaFile.delete()); + out.close(); + assertEquals("The corrupt replica could not be invalidated", 0, + countReplicas(namesystem, blk).corruptReplicas()); + /* + * Sleep for 3 seconds, for under replicated block to get replicated. As + * one second will be taken by ReplicationMonitor and one more second for + * invalidated block to get deleted from the datanode. + */ + Thread.sleep(3000); + blk = DFSTestUtil.getFirstBlock(fs, testPath); + assertEquals("There should be three live replicas", 3, + countReplicas(namesystem, blk).liveReplicas()); + } finally { + if (out != null) { + out.close(); + } + cluster.shutdown(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 74be37d986b..7baa47a490e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -136,6 +136,11 @@ public class DataNodeTestUtils { ) throws IOException { return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b); } + + public static File getMetaFile(DataNode dn, String bpid, Block b) + throws IOException { + return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b); + } public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java index 211737fa73e..05a2cec906c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java @@ -36,6 +36,12 @@ public class FsDatasetTestUtil { ) throws IOException { return ((FsDatasetImpl)fsd).getBlockFile(bpid, b); } + + public static File getMetaFile(FsDatasetSpi fsd, String bpid, Block b) + throws IOException { + return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b + .getGenerationStamp()); + } public static boolean unlinkBlock(FsDatasetSpi fsd, ExtendedBlock block, int numLinks) throws IOException { From 27ce5a6fb0eb14268b29a513e44c1eb08096f346 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 8 May 2012 21:53:11 +0000 Subject: [PATCH 38/70] Remove the empty file FSInodeInfo.java for HDFS-3363. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335788 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java deleted file mode 100644 index e69de29bb2d..00000000000 From b1e883132b8b0a9678b37cef4eb1b2e04f0200e7 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Wed, 9 May 2012 14:08:28 +0000 Subject: [PATCH 39/70] HADOOP-8354. test-patch findbugs may fail if a dependent module is changed Contributed by Tom White and Robert Evans. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336213 13f79535-47bb-0310-9956-ffa450edef68 --- dev-support/test-patch.sh | 19 +++++++++++++++++++ .../hadoop-common/CHANGES.txt | 3 +++ 2 files changed, 22 insertions(+) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index ff52bd353de..90757a1d8b5 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -528,6 +528,24 @@ $JIRA_COMMENT_FOOTER" return 0 } +############################################################################### +### Install the new jars so tests and findbugs can find all of the updated jars +buildAndInstall () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Installing all of the jars" + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + echo "$MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess" + $MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess + return $? +} + + ############################################################################### ### Check there are no changes in the number of Findbugs warnings checkFindbugsWarnings () { @@ -891,6 +909,7 @@ checkEclipseGeneration ### Checkstyle not implemented yet #checkStyle #(( RESULT = RESULT + $? )) +buildAndInstall checkFindbugsWarnings (( RESULT = RESULT + $? )) checkReleaseAuditWarnings diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 0cc39a58b4a..283158e8500 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -129,6 +129,9 @@ Trunk (unreleased changes) HADOOP-8339. jenkins complaining about 16 javadoc warnings (Tom White and Robert Evans via tgraves) + HADOOP-8354. test-patch findbugs may fail if a dependent module is changed + (Tom White and Robert Evans) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) From c853a425a04efb1f4c953c9ae123d9cf1e600e10 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Wed, 9 May 2012 17:06:27 +0000 Subject: [PATCH 40/70] HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336299 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/ipc/TestRPC.java | 23 +++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 283158e8500..c34deefafd7 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -550,6 +550,8 @@ Release 0.23.3 - UNRELEASED HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby) + HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 56b2b2487ba..cc0c5c9f54c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -322,6 +322,29 @@ public class TestRPC { server.stop(); } + @Test + public void testProxyAddress() throws Exception { + Server server = RPC.getServer(TestProtocol.class, + new TestImpl(), ADDRESS, 0, conf); + TestProtocol proxy = null; + + try { + server.start(); + InetSocketAddress addr = NetUtils.getConnectAddress(server); + + // create a client + proxy = (TestProtocol)RPC.getProxy( + TestProtocol.class, TestProtocol.versionID, addr, conf); + + assertEquals(addr, RPC.getServerAddress(proxy)); + } finally { + server.stop(); + if (proxy != null) { + RPC.stopProxy(proxy); + } + } + } + @Test public void testSlowRpc() throws Exception { System.out.println("Testing Slow RPC"); From e3fbc526861cdacbd61ab3d2e0c88a53735e29d9 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 9 May 2012 18:16:40 +0000 Subject: [PATCH 41/70] HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG logging is enabled. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336324 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 449ff1ff5da..a88b273bb6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -435,6 +435,9 @@ Release 2.0.0 - UNRELEASED so that INodeFile and INodeFileUnderConstruction do not have to be used in block management. (John George via szetszwo) + HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG + logging is enabled. (atm) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 5b572362411..c7c206fcb73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -26,6 +26,8 @@ import java.util.HashMap; import java.util.List; import java.util.TreeSet; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -64,9 +66,11 @@ import org.apache.hadoop.util.ToolRunner; @InterfaceAudience.Private public class DFSAdmin extends FsShell { - static{ + static { HdfsConfiguration.init(); } + + private static final Log LOG = LogFactory.getLog(DFSAdmin.class); /** * An abstract class for the execution of a file system command @@ -1089,6 +1093,7 @@ public class DFSAdmin extends FsShell { return exitCode; } + Exception debugException = null; exitCode = 0; try { if ("-report".equals(cmd)) { @@ -1143,6 +1148,7 @@ public class DFSAdmin extends FsShell { printUsage(""); } } catch (IllegalArgumentException arge) { + debugException = arge; exitCode = -1; System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd); @@ -1151,6 +1157,7 @@ public class DFSAdmin extends FsShell { // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. exitCode = -1; + debugException = e; try { String[] content; content = e.getLocalizedMessage().split("\n"); @@ -1159,12 +1166,17 @@ public class DFSAdmin extends FsShell { } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); + debugException = ex; } } catch (Exception e) { exitCode = -1; + debugException = e; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); - } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Exception encountered:", debugException); + } return exitCode; } From 0308423b9c8d7a80907f3b84e1d1ce286bd340de Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Wed, 9 May 2012 21:08:53 +0000 Subject: [PATCH 42/70] MAPREDUCE-4237. TestNodeStatusUpdater can fail if localhost has a domain associated with it (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336399 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../yarn/server/nodemanager/TestNodeStatusUpdater.java | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index dd6f4d7e7d5..7b8e045a2d4 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -475,6 +475,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4215. RM app page shows 500 error on appid parse error (Jonathon Eagles via tgraves) + MAPREDUCE-4237. TestNodeStatusUpdater can fail if localhost has a domain + associated with it (bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 56153379deb..30c54e2dd6d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager; import static org.mockito.Mockito.mock; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -35,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -88,7 +90,7 @@ public class TestNodeStatusUpdater { int heartBeatID = 0; volatile Throwable nmStartError = null; private final List registeredNodes = new ArrayList(); - private final Configuration conf = new YarnConfiguration(); + private final Configuration conf = createNMConfig(); private NodeManager nm; protected NodeManager rebootedNodeManager; @@ -117,7 +119,9 @@ public class TestNodeStatusUpdater { Resource resource = request.getResource(); LOG.info("Registering " + nodeId.toString()); // NOTE: this really should be checking against the config value - Assert.assertEquals("localhost:12345", nodeId.toString()); + InetSocketAddress expected = NetUtils.getConnectAddress( + conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1)); + Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString()); Assert.assertEquals(5 * 1024, resource.getMemory()); registeredNodes.add(nodeId); RegistrationResponse regResponse = recordFactory @@ -429,6 +433,7 @@ public class TestNodeStatusUpdater { while (nm.getServiceState() == STATE.INITED && waitCount++ != 20) { LOG.info("Waiting for NM to start.."); if (nmStartError != null) { + LOG.error("Error during startup. ", nmStartError); Assert.fail(nmStartError.getCause().getMessage()); } Thread.sleep(1000); From 1ceecf2aa241d161bcd27b8c556aacd0d29c1cb8 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Wed, 9 May 2012 23:18:15 +0000 Subject: [PATCH 43/70] HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname starting with a numeric character. Contributed by Junping Du. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336446 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/net/NetUtils.java | 15 ++++-------- .../org/apache/hadoop/net/TestNetUtils.java | 24 +++++++++++++++++-- .../apache/hadoop/net/TestTableMapping.java | 12 +++++----- 4 files changed, 36 insertions(+), 18 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c34deefafd7..eeb320514f8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -432,6 +432,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8359. Fix javadoc warnings in Configuration. (Anupam Seth via szetszwo) + HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname + starting with a numeric character. (Junping Du via suresh) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index 0fe61ad21c5..b3c77c20336 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -494,7 +494,7 @@ public class NetUtils { * also takes a local address and port to bind the socket to. * * @param socket - * @param address the remote address + * @param endpoint the remote address * @param localAddr the local address to bind the socket to * @param timeout timeout in milliseconds */ @@ -549,16 +549,11 @@ public class NetUtils { * @return its IP address in the string format */ public static String normalizeHostName(String name) { - if (Character.digit(name.charAt(0), 10) != -1) { // it is an IP + try { + return InetAddress.getByName(name).getHostAddress(); + } catch (UnknownHostException e) { return name; - } else { - try { - InetAddress ipAddress = InetAddress.getByName(name); - return ipAddress.getHostAddress(); - } catch (UnknownHostException e) { - return name; - } - } + } } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index 61ac35c5cc9..e8455862d79 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -31,7 +31,9 @@ import java.net.SocketException; import java.net.SocketTimeoutException; import java.net.URI; import java.net.UnknownHostException; +import java.util.Arrays; import java.util.Enumeration; +import java.util.List; import java.util.concurrent.TimeUnit; import junit.framework.AssertionFailedError; @@ -42,8 +44,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.NetUtilsTestResolver; -import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; -import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread; import org.junit.Assume; import org.junit.Before; import org.junit.BeforeClass; @@ -599,6 +599,26 @@ public class TestNetUtils { assertEquals("scheme://host.a.b/path", uri.toString()); } + /** + * Test for {@link NetUtils#normalizeHostNames} + */ + @Test + public void testNormalizeHostName() { + List hosts = Arrays.asList(new String[] {"127.0.0.1", + "localhost", "3w.org", "UnknownHost"}); + List normalizedHosts = NetUtils.normalizeHostNames(hosts); + // when ipaddress is normalized, same address is expected in return + assertEquals(normalizedHosts.get(0), hosts.get(0)); + // for normalizing a resolvable hostname, resolved ipaddress is expected in return + assertFalse(normalizedHosts.get(1).equals(hosts.get(1))); + assertEquals(normalizedHosts.get(1), hosts.get(0)); + // this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric, + // its ipaddress is expected to return + assertFalse(normalizedHosts.get(2).equals(hosts.get(2))); + // return the same hostname after normalizing a irresolvable hostname. + assertEquals(normalizedHosts.get(3), hosts.get(3)); + } + @Test public void testGetHostNameOfIP() { assertNull(NetUtils.getHostNameOfIP(null)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java index f8b3c33340c..6356555da42 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java @@ -41,7 +41,7 @@ public class TestTableMapping { public void setUp() throws IOException { mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt"); Files.write("a.b.c /rack1\n" + - "1.2.3\t/rack2\n", mappingFile, Charsets.UTF_8); + "1.2.3.4\t/rack2\n", mappingFile, Charsets.UTF_8); mappingFile.deleteOnExit(); } @@ -55,7 +55,7 @@ public class TestTableMapping { List names = new ArrayList(); names.add("a.b.c"); - names.add("1.2.3"); + names.add("1.2.3.4"); List result = mapping.resolve(names); assertEquals(names.size(), result.size()); @@ -73,7 +73,7 @@ public class TestTableMapping { List names = new ArrayList(); names.add("a.b.c"); - names.add("1.2.3"); + names.add("1.2.3.4"); List result1 = mapping.resolve(names); assertEquals(names.size(), result1.size()); @@ -96,7 +96,7 @@ public class TestTableMapping { List names = new ArrayList(); names.add("a.b.c"); - names.add("1.2.3"); + names.add("1.2.3.4"); List result = mapping.resolve(names); assertEquals(names.size(), result.size()); @@ -114,7 +114,7 @@ public class TestTableMapping { List names = new ArrayList(); names.add("a.b.c"); - names.add("1.2.3"); + names.add("1.2.3.4"); List result = mapping.resolve(names); assertEquals(names.size(), result.size()); @@ -134,7 +134,7 @@ public class TestTableMapping { List names = new ArrayList(); names.add("a.b.c"); - names.add("1.2.3"); + names.add("1.2.3.4"); List result = mapping.resolve(names); assertEquals(names.size(), result.size()); From b75f0187c836aea2a95bac08e77f8802d7dbd4bd Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 10 May 2012 00:30:53 +0000 Subject: [PATCH 44/70] HDFS-3341, HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual release. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336459 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../org/apache/hadoop/util/VersionUtil.java | 33 ++++++++++++++++++- .../apache/hadoop/util/TestVersionUtil.java | 10 +++++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 +-- 5 files changed, 49 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index eeb320514f8..4a27999e9be 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -299,6 +299,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8356. FileSystem service loading mechanism should print the FileSystem impl it is failing to load (tucu) + HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual + final release. (todd) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java index dd68c4d74b1..09a272317f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java @@ -22,11 +22,20 @@ import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; +import com.google.common.collect.ComparisonChain; + @InterfaceAudience.Private public abstract class VersionUtil { private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)"); + /** + * Suffix added by maven for nightly builds and other snapshot releases. + * These releases are considered to precede the non-SNAPSHOT version + * with the same version number. + */ + private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT"; + /** * This function splits the two versions on "." and performs a * naturally-ordered comparison of the resulting components. For example, the @@ -48,6 +57,11 @@ public abstract class VersionUtil { * between the two versions, then the version with fewer components is * considered to precede the version with more components. * + * In addition to the above rules, there is one special case: maven SNAPSHOT + * releases are considered to precede a non-SNAPSHOT release with an + * otherwise identical version number. For example, 2.0-SNAPSHOT precedes + * 2.0. + * * This function returns a negative integer if version1 precedes version2, a * positive integer if version2 precedes version1, and 0 if and only if the * two versions' components are identical in value and cardinality. @@ -61,6 +75,11 @@ public abstract class VersionUtil { * versions are equal. */ public static int compareVersions(String version1, String version2) { + boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX); + boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX); + version1 = stripSnapshotSuffix(version1); + version2 = stripSnapshotSuffix(version2); + String[] version1Parts = version1.split("\\."); String[] version2Parts = version2.split("\\."); @@ -87,9 +106,21 @@ public abstract class VersionUtil { return component1.length() - component2.length(); } } - return version1Parts.length - version2Parts.length; + + return ComparisonChain.start() + .compare(version1Parts.length, version2Parts.length) + .compare(isSnapshot2, isSnapshot1) + .result(); } + private static String stripSnapshotSuffix(String version) { + if (version.endsWith(SNAPSHOT_SUFFIX)) { + return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length()); + } else { + return version; + } + } + private static boolean isNumeric(String s) { try { Integer.parseInt(s); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java index a300cd25fb7..f01ae2f73d2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java @@ -19,7 +19,6 @@ package org.apache.hadoop.util; import static org.junit.Assert.*; -import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; public class TestVersionUtil { @@ -30,6 +29,8 @@ public class TestVersionUtil { assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0")); assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a")); assertEquals(0, VersionUtil.compareVersions("1", "1")); + assertEquals(0, VersionUtil.compareVersions( + "2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT")); // Assert that lower versions are lower, and higher versions are higher. assertExpectedValues("1", "2.0.0"); @@ -52,6 +53,13 @@ public class TestVersionUtil { assertExpectedValues("1.0.0a2", "1.0.0a10"); assertExpectedValues("1.0", "1.a"); assertExpectedValues("1.0", "1.a0"); + + // Snapshot builds precede their eventual releases. + assertExpectedValues("1.0-SNAPSHOT", "1.0"); + assertExpectedValues("1.0", "1.0.0-SNAPSHOT"); + assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0"); + assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT"); + assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1"); } private static void assertExpectedValues(String lower, String higher) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a88b273bb6d..261111e7504 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -438,6 +438,9 @@ Release 2.0.0 - UNRELEASED HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG logging is enabled. (atm) + HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of + final releases. (todd) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 2843d457f83..e037e2196a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -145,7 +145,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained"; public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version"; - public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0"; + public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT"; public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; @@ -263,7 +263,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020; public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT; public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version"; - public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0"; + public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT"; public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable"; public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false; From a67054d7c3d3a90d5ea6088cc4fae31a05d7e9a0 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 04:52:46 +0000 Subject: [PATCH 45/70] HDFS-3328. NPE in DataNode.getIpcPort. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336480 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 261111e7504..20763b554e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -627,6 +627,8 @@ Release 2.0.0 - UNRELEASED HDFS-3376. DFSClient fails to make connection to DN if there are many unusable cached sockets (todd) + HDFS-3328. NPE in DataNode.getIpcPort. (eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 6bb78df6bc8..fdcfa569705 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -860,7 +860,7 @@ public class DataNode extends Configured */ public String getDisplayName() { // NB: our DatanodeID may not be set yet - return hostName + ":" + getIpcPort(); + return hostName + ":" + getXferPort(); } /** @@ -877,7 +877,6 @@ public class DataNode extends Configured /** * @return the datanode's IPC port */ - @VisibleForTesting public int getIpcPort() { return ipcServer.getListenerAddress().getPort(); } From 1849fde096b3ffc0000756275816be1a1f59c8e3 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 06:16:01 +0000 Subject: [PATCH 46/70] HADOOP-7868. Hadoop native fails to compile when default linker option is -Wl,--as-needed. Contributed by Trevor Robinson git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336491 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/native/acinclude.m4 | 6 +++--- .../hadoop-common/src/main/native/configure.ac | 14 ++++++++++++-- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4a27999e9be..23ff1722729 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -496,6 +496,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8172. Configuration no longer sets all keys in a deprecated key list. (Anupam Seth via bobby) + HADOOP-7868. Hadoop native fails to compile when default linker + option is -Wl,--as-needed. (Trevor Robinson via eli) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 b/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 index 3e2c013866a..93e05b8148d 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 +++ b/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 @@ -1,4 +1,4 @@ -# AC_COMPUTE_NEEDED_DSO(LIBRARY, PREPROC_SYMBOL) +# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL) # -------------------------------------------------- # Compute the 'actual' dynamic-library used # for LIBRARY and set it to PREPROC_SYMBOL @@ -6,7 +6,7 @@ AC_DEFUN([AC_COMPUTE_NEEDED_DSO], [ AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1, [ - echo 'int main(int argc, char **argv){return 0;}' > conftest.c + echo '$2' > conftest.c if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then dnl Try objdump and ldd in that order to get the dynamic library if test ! -z "`which objdump | grep -v 'no objdump'`"; then @@ -24,5 +24,5 @@ AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_lib rm -f conftest* ] ) -AC_DEFINE_UNQUOTED($2, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1']) +AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1']) ])# AC_COMPUTE_NEEDED_DSO diff --git a/hadoop-common-project/hadoop-common/src/main/native/configure.ac b/hadoop-common-project/hadoop-common/src/main/native/configure.ac index 4f9e63100e7..34408d64182 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/configure.ac +++ b/hadoop-common-project/hadoop-common/src/main/native/configure.ac @@ -87,10 +87,20 @@ CPPFLAGS=$cppflags_bak AC_SUBST([JNI_CPPFLAGS]) dnl Check for zlib headers -AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.)) +AC_CHECK_HEADERS([zlib.h zconf.h], + AC_COMPUTE_NEEDED_DSO(z, + [#include "zlib.h" + int main(int argc, char **argv){zlibVersion();return 0;}], + HADOOP_ZLIB_LIBRARY), + AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.)) dnl Check for snappy headers -AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRARY), AC_MSG_WARN(Snappy headers were not found... building without snappy.)) +AC_CHECK_HEADERS([snappy-c.h], + AC_COMPUTE_NEEDED_DSO(snappy, + [#include "snappy-c.h" + int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}], + HADOOP_SNAPPY_LIBRARY), + AC_MSG_WARN(Snappy headers were not found... building without snappy.)) dnl Check for headers needed by the native Group resolution implementation AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.)) From 54303465a6e351a11001346741188cbdeeebb87e Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 06:28:56 +0000 Subject: [PATCH 47/70] HDFS-3396. FUSE build fails on Ubuntu 12.04. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336495 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 20763b554e9..39ce0d7f4e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -629,6 +629,8 @@ Release 2.0.0 - UNRELEASED HDFS-3328. NPE in DataNode.getIpcPort. (eli) + HDFS-3396. FUSE build fails on Ubuntu 12.04. (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am index 85c81c226aa..706297f314e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am @@ -18,4 +18,5 @@ bin_PROGRAMS = fuse_dfs fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c fuse_impls_chown.c fuse_impls_create.c fuse_impls_flush.c fuse_impls_getattr.c fuse_impls_mkdir.c fuse_impls_mknod.c fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c fuse_impls_unlink.c fuse_impls_write.c AM_CFLAGS= -Wall -g AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include -AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -lm +AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib64 -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -L$(FUSE_HOME)/lib -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server +fuse_dfs_LDADD=-lfuse -lhdfs -ljvm -lm From f1ff05bf47a7dfb670bc63e4e6e58d74f6b5b4a7 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Thu, 10 May 2012 09:59:39 +0000 Subject: [PATCH 48/70] Reverting (Need to re-do the patch. new BlockInfo does not set iNode ) HDFS-3157. Error in deleting block is keep on coming from DN even after the block report and directory scanning has happened. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336572 13f79535-47bb-0310-9956-ffa450edef68 --- .../server/blockmanagement/BlockManager.java | 6 +- .../TestRBWBlockInvalidation.java | 104 ------------------ .../server/datanode/DataNodeTestUtils.java | 5 - .../fsdataset/impl/FsDatasetTestUtil.java | 6 - 4 files changed, 2 insertions(+), 119 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d552eab42e0..a3c2761eaa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1802,8 +1802,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block case COMPLETE: case COMMITTED: if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) { - return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock - .getINode().getReplication()), + return new BlockToMarkCorrupt(storedBlock, "block is " + ucState + " and reported genstamp " + iblk.getGenerationStamp() + " does not match " + "genstamp in block map " + storedBlock.getGenerationStamp()); @@ -1823,8 +1822,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if (!storedBlock.isComplete()) { return null; // not corrupt } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) { - return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock - .getINode().getReplication()), + return new BlockToMarkCorrupt(storedBlock, "reported " + reportedState + " replica with genstamp " + iblk.getGenerationStamp() + " does not match COMPLETE block's " + "genstamp in block map " + storedBlock.getGenerationStamp()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java deleted file mode 100644 index e45dd6a0a62..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; - -import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test when RBW block is removed. Invalidation of the corrupted block happens - * and then the under replicated block gets replicated to the datanode. - */ -public class TestRBWBlockInvalidation { - private static NumberReplicas countReplicas(final FSNamesystem namesystem, - ExtendedBlock block) { - return namesystem.getBlockManager().countNodes(block.getLocalBlock()); - } - - /** - * Test when a block's replica is removed from RBW folder in one of the - * datanode, namenode should ask to invalidate that corrupted block and - * schedule replication for one more replica for that under replicated block. - */ - @Test - public void testBlockInvalidationWhenRBWReplicaMissedInDN() - throws IOException, InterruptedException { - Configuration conf = new HdfsConfiguration(); - conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); - conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) - .build(); - FSDataOutputStream out = null; - try { - final FSNamesystem namesystem = cluster.getNamesystem(); - FileSystem fs = cluster.getFileSystem(); - Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1"); - out = fs.create(testPath, (short) 3); - out.writeBytes("HDFS-3157: " + testPath); - out.hsync(); - String bpid = namesystem.getBlockPoolId(); - ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath); - Block block = blk.getLocalBlock(); - // Deleting partial block and its meta information from the RBW folder - // of first datanode. - DataNode dn = cluster.getDataNodes().get(0); - File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block); - File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block); - assertTrue("Could not delete the block file from the RBW folder", - blockFile.delete()); - assertTrue("Could not delete the block meta file from the RBW folder", - metaFile.delete()); - out.close(); - assertEquals("The corrupt replica could not be invalidated", 0, - countReplicas(namesystem, blk).corruptReplicas()); - /* - * Sleep for 3 seconds, for under replicated block to get replicated. As - * one second will be taken by ReplicationMonitor and one more second for - * invalidated block to get deleted from the datanode. - */ - Thread.sleep(3000); - blk = DFSTestUtil.getFirstBlock(fs, testPath); - assertEquals("There should be three live replicas", 3, - countReplicas(namesystem, blk).liveReplicas()); - } finally { - if (out != null) { - out.close(); - } - cluster.shutdown(); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 7baa47a490e..74be37d986b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -136,11 +136,6 @@ public class DataNodeTestUtils { ) throws IOException { return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b); } - - public static File getMetaFile(DataNode dn, String bpid, Block b) - throws IOException { - return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b); - } public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java index 05a2cec906c..211737fa73e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java @@ -36,12 +36,6 @@ public class FsDatasetTestUtil { ) throws IOException { return ((FsDatasetImpl)fsd).getBlockFile(bpid, b); } - - public static File getMetaFile(FsDatasetSpi fsd, String bpid, Block b) - throws IOException { - return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b - .getGenerationStamp()); - } public static boolean unlinkBlock(FsDatasetSpi fsd, ExtendedBlock block, int numLinks) throws IOException { From f5960f71d9dcdc237f08e41bf1ebfbd572ca83af Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 10 May 2012 13:49:06 +0000 Subject: [PATCH 49/70] HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336690 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/main/java/org/apache/hadoop/net/NetUtils.java | 4 ++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java | 5 ++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index b3c77c20336..ac48a08da72 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -140,7 +140,7 @@ public class NetUtils { /** * Util method to build socket addr from either: - * : + * : * ://:/ */ public static InetSocketAddress createSocketAddr(String target) { @@ -150,7 +150,7 @@ public class NetUtils { /** * Util method to build socket addr from either: * - * : + * : * ://:/ */ public static InetSocketAddress createSocketAddr(String target, diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 39ce0d7f4e8..b867df14a89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -631,6 +631,9 @@ Release 2.0.0 - UNRELEASED HDFS-3396. FUSE build fails on Ubuntu 12.04. (Colin Patrick McCabe via eli) + HDFS-3395. NN doesn't start with HA+security enabled and HTTP address + set to 0.0.0.0. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 9e244a25315..dccc4e2ee0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -714,8 +714,11 @@ public class DFSUtil { public static String substituteForWildcardAddress(String configuredAddress, String defaultHost) throws IOException { InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress); + InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost + + ":0"); if (sockAddr.getAddress().isAnyLocalAddress()) { - if(UserGroupInformation.isSecurityEnabled()) { + if (UserGroupInformation.isSecurityEnabled() && + defaultSockAddr.getAddress().isAnyLocalAddress()) { throw new IOException("Cannot use a wildcard address with security. " + "Must explicitly set bind address for Kerberos"); } From 6a0865440e335dd306cba12f97fad703bb445216 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 17:58:55 +0000 Subject: [PATCH 50/70] HDFS-3230. Cleanup DatanodeID creation in the tests. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336815 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/web/JsonUtil.java | 1 - .../org/apache/hadoop/hdfs/DFSTestUtil.java | 18 ++++- .../hadoop/hdfs/TestDFSClientRetries.java | 3 +- .../hdfs/TestReplaceDatanodeOnFailure.java | 2 +- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 25 +++--- .../security/token/block/TestBlockToken.java | 4 +- .../blockmanagement/TestBlockManager.java | 49 ++++++------ .../blockmanagement/TestHost2NodesMap.java | 72 ++++++++--------- .../TestPendingDataNodeMessages.java | 11 ++- .../TestReplicationPolicy.java | 78 ++++++++++--------- .../hadoop/net/TestNetworkTopology.java | 57 ++++++++------ 12 files changed, 179 insertions(+), 143 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b867df14a89..8885859aa04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -634,6 +634,8 @@ Release 2.0.0 - UNRELEASED HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm) + HDFS-3230. Cleanup DatanodeID creation in the tests. (eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 9218078a482..912f362728c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 7c8a02e513e..eded4667dca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -706,13 +706,23 @@ public class DFSTestUtil { .join(nameservices)); } + public static DatanodeID getLocalDatanodeID() { + return new DatanodeID("127.0.0.1", "localhost", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT); + } + + public static DatanodeID getLocalDatanodeID(int port) { + return new DatanodeID("127.0.0.1", "localhost", "", port, port, port); + } + public static DatanodeDescriptor getLocalDatanodeDescriptor() { - return new DatanodeDescriptor( - new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)); + return new DatanodeDescriptor(getLocalDatanodeID()); } public static DatanodeInfo getLocalDatanodeInfo() { - return new DatanodeInfo( - new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)); + return new DatanodeInfo(getLocalDatanodeID()); + } + + public static DatanodeInfo getLocalDatanodeInfo(int port) { + return new DatanodeInfo(getLocalDatanodeID(port)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 86bef8e1ee7..b675f09049b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -627,8 +627,7 @@ public class TestDFSClientRetries extends TestCase { server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = new DatanodeID( - "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); + DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 9841dc8700e..9a7504a0508 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -62,7 +62,7 @@ public class TestReplaceDatanodeOnFailure { final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][]; datanodes[0] = new DatanodeInfo[0]; for(int i = 0; i < infos.length; ) { - infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100)); + infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i); i++; datanodes[i] = new DatanodeInfo[i]; System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index a6280d319aa..6a8d687a3aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -131,7 +131,7 @@ public class TestPBHelper { @Test public void testConvertDatanodeID() { - DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3); + DatanodeID dn = DFSTestUtil.getLocalDatanodeID(); DatanodeIDProto dnProto = PBHelper.convert(dn); DatanodeID dn2 = PBHelper.convert(dnProto); compare(dn, dn2); @@ -280,10 +280,6 @@ public class TestPBHelper { return new ExtendedBlock("bpid", blkid, 100, 2); } - private DatanodeInfo getDNInfo() { - return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2)); - } - private void compare(DatanodeInfo dn1, DatanodeInfo dn2) { assertEquals(dn1.getAdminState(), dn2.getAdminState()); assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed()); @@ -316,7 +312,9 @@ public class TestPBHelper { @Test public void testConvertRecoveringBlock() { - DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() }; + DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3); RecoveringBlockProto bProto = PBHelper.convert(b); RecoveringBlock b1 = PBHelper.convert(bProto); @@ -330,7 +328,9 @@ public class TestPBHelper { @Test public void testConvertBlockRecoveryCommand() { - DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() }; + DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; List blks = ImmutableList.of( new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), @@ -401,11 +401,14 @@ public class TestPBHelper { @Test public void testConvertLocatedBlock() { DatanodeInfo [] dnInfos = new DatanodeInfo[3]; - dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999, + dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0", + 5000, 5001, 5002, 20000, 10001, 9999, 59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS); - dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999, + dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1", + 5000, 5001, 5002, 20000, 10001, 9999, 59, 69, 32, "local", AdminStates.DECOMMISSIONED); - dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999, + dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2", + 5000, 5001, 5002, 20000, 10001, 9999, 59, 69, 32, "local", AdminStates.NORMAL); LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); @@ -424,7 +427,7 @@ public class TestPBHelper { @Test public void testConvertDatanodeRegistration() { - DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0); + DatanodeID dnId = DFSTestUtil.getLocalDatanodeID(); BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) }; ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index ea335d26120..bf2c33815bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -279,8 +280,7 @@ public class TestBlockToken { server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = new DatanodeID("localhost", - "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); + DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index a6e8c4f05b8..f13b2779fad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -47,17 +47,10 @@ import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.Lists; public class TestBlockManager { - private final List nodes = ImmutableList.of( - new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"), - new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"), - new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB") - ); - private final List rackA = nodes.subList(0, 3); - private final List rackB = nodes.subList(3, 6); - + private List nodes; + private List rackA; + private List rackB; + /** * Some of these tests exercise code which has some randomness involved - * ie even if there's a bug, they may pass because the random node selection @@ -82,6 +75,16 @@ public class TestBlockManager { fsn = Mockito.mock(FSNamesystem.class); Mockito.doReturn(true).when(fsn).hasWriteLock(); bm = new BlockManager(fsn, fsn, conf); + nodes = ImmutableList.of( + new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"), + new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"), + new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB") + ); + rackA = nodes.subList(0, 3); + rackB = nodes.subList(3, 6); } private void addNodes(Iterable nodesToAdd) { @@ -116,7 +119,7 @@ public class TestBlockManager { } private void doBasicTest(int testIndex) { - List origNodes = nodes(0, 1); + List origNodes = getNodes(0, 1); BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes); DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo); @@ -147,7 +150,7 @@ public class TestBlockManager { private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception { // Block originally on A1, A2, B1 - List origNodes = nodes(0, 1, 3); + List origNodes = getNodes(0, 1, 3); BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes); // Decommission two of the nodes (A1, A2) @@ -157,7 +160,7 @@ public class TestBlockManager { assertTrue("Source of replication should be one of the nodes the block " + "was on. Was: " + pipeline[0], origNodes.contains(pipeline[0])); - assertEquals("Should have two targets", 3, pipeline.length); + assertEquals("Should have three targets", 3, pipeline.length); boolean foundOneOnRackA = false; for (int i = 1; i < pipeline.length; i++) { @@ -190,7 +193,7 @@ public class TestBlockManager { private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception { // Block originally on A1, A2, B1 - List origNodes = nodes(0, 1, 3); + List origNodes = getNodes(0, 1, 3); BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes); // Decommission all of the nodes @@ -242,7 +245,7 @@ public class TestBlockManager { private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception { // Block originally on A1, A2, B1 - List origNodes = nodes(0, 1, 3); + List origNodes = getNodes(0, 1, 3); BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes); // Decommission all of the nodes in rack A @@ -252,7 +255,7 @@ public class TestBlockManager { assertTrue("Source of replication should be one of the nodes the block " + "was on. Was: " + pipeline[0], origNodes.contains(pipeline[0])); - assertEquals("Should have 2 targets", 3, pipeline.length); + assertEquals("Should have three targets", 3, pipeline.length); boolean foundOneOnRackB = false; for (int i = 1; i < pipeline.length; i++) { @@ -273,7 +276,8 @@ public class TestBlockManager { // the block is still under-replicated. Add a new node. This should allow // the third off-rack replica. - DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC"); + DatanodeDescriptor rackCNode = + new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC"); addNodes(ImmutableList.of(rackCNode)); try { DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo); @@ -359,7 +363,7 @@ public class TestBlockManager { return blockInfo; } - private List nodes(int ... indexes) { + private List getNodes(int ... indexes) { List ret = Lists.newArrayList(); for (int idx : indexes) { ret.add(nodes.get(idx)); @@ -368,7 +372,7 @@ public class TestBlockManager { } private List startDecommission(int ... indexes) { - List nodes = nodes(indexes); + List nodes = getNodes(indexes); for (DatanodeDescriptor node : nodes) { node.startDecommission(); } @@ -404,8 +408,9 @@ public class TestBlockManager { LinkedListMultimap repls = getAllPendingReplications(); assertEquals(1, repls.size()); - Entry repl = repls.entries() - .iterator().next(); + Entry repl = + repls.entries().iterator().next(); + DatanodeDescriptor[] targets = repl.getValue().targets; DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index 08607093dbe..44fa7c24a5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -18,73 +18,75 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import org.apache.hadoop.hdfs.protocol.DatanodeID; + import org.junit.Before; import org.junit.Test; +import static org.junit.Assert.*; + public class TestHost2NodesMap { private Host2NodesMap map = new Host2NodesMap(); - private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"), - }; - private final DatanodeDescriptor NULL_NODE = null; - private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040), - "/d1/r4"); - + private DatanodeDescriptor dataNodes[]; + @Before public void setup() { - for(DatanodeDescriptor node:dataNodes) { + dataNodes = new DatanodeDescriptor[] { + new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"), + }; + for (DatanodeDescriptor node : dataNodes) { map.add(node); } - map.add(NULL_NODE); + map.add(null); } @Test public void testContains() throws Exception { - for(int i=0; i q = msgs.takeBlockQueue(block1Gs2DifferentInstance); assertEquals( - "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," + - "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]", + "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," + + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]", Joiner.on(",").join(q)); assertEquals(0, msgs.count()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 49925ab885a..9ce24a7fae6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.junit.Assert.*; -import java.io.IOException; +import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -39,54 +39,55 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; +import org.junit.BeforeClass; import org.junit.Test; public class TestReplicationPolicy { - private Random random= DFSUtil.getRandom(); + private Random random = DFSUtil.getRandom(); private static final int BLOCK_SIZE = 1024; private static final int NUM_OF_DATANODES = 6; - private static final Configuration CONF = new HdfsConfiguration(); - private static final NetworkTopology cluster; - private static final NameNode namenode; - private static final BlockPlacementPolicy replicator; + private static NetworkTopology cluster; + private static NameNode namenode; + private static BlockPlacementPolicy replicator; private static final String filename = "/dummyfile.txt"; - private static final DatanodeDescriptor dataNodes[] = - new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3") - }; - - private final static DatanodeDescriptor NODE = - new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4"); - - static { - try { - FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); - CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - DFSTestUtil.formatNameNode(CONF); - namenode = new NameNode(CONF); - } catch (IOException e) { - e.printStackTrace(); - throw (RuntimeException)new RuntimeException().initCause(e); - } + private static DatanodeDescriptor dataNodes[]; + + @BeforeClass + public static void setupCluster() throws Exception { + Configuration conf = new HdfsConfiguration(); + dataNodes = new DatanodeDescriptor[] { + new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"), + new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3") + }; + + FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + File baseDir = new File(System.getProperty( + "test.build.data", "build/test/data"), "dfs/"); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + + DFSTestUtil.formatNameNode(conf); + namenode = new NameNode(conf); + final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology - for(int i=0; i frequency = pickNodesAtRandom(100, scope); @@ -186,6 +198,7 @@ public class TestNetworkTopology extends TestCase { /** * This test checks that chooseRandom works for an excluded rack. */ + @Test public void testChooseRandomExcludedRack() { Map frequency = pickNodesAtRandom(100, "~" + "/d2"); // all the nodes on the second rack should be zero From 6957c7806603edcce99b6aba2e3f402b186fb04a Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Thu, 10 May 2012 21:09:08 +0000 Subject: [PATCH 51/70] HADOOP-8375. test-patch should stop immediately once it has found compilation errors (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336887 13f79535-47bb-0310-9956-ffa450edef68 --- dev-support/test-patch.sh | 14 ++++++++++---- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 90757a1d8b5..2d6bcfebd4e 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -423,8 +423,8 @@ checkJavacWarnings () { if [[ $? != 0 ]] ; then JIRA_COMMENT="$JIRA_COMMENT - -1 javac. The patch appears to cause tar ant target to fail." - return 1 + -1 javac. The patch appears to cause the build to fail." + return 2 fi ### Compare trunk and patch javac warning numbers if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then @@ -900,9 +900,15 @@ if [[ $? != 0 ]] ; then submitJiraComment 1 cleanupAndExit 1 fi -checkJavadocWarnings -(( RESULT = RESULT + $? )) checkJavacWarnings +JAVAC_RET=$? +#2 is returned if the code could not compile +if [[ $JAVAC_RET == 2 ]] ; then + submitJiraComment 1 + cleanupAndExit 1 +fi +(( RESULT = RESULT + $JAVAC_RET )) +checkJavadocWarnings (( RESULT = RESULT + $? )) checkEclipseGeneration (( RESULT = RESULT + $? )) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 23ff1722729..d849db5d0d6 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -132,6 +132,9 @@ Trunk (unreleased changes) HADOOP-8354. test-patch findbugs may fail if a dependent module is changed (Tom White and Robert Evans) + HADOOP-8375. test-patch should stop immediately once it has found + compilation errors (bobby) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) From 7e8e983620f3ae3462d115972707c72b7d9cbabd Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 10 May 2012 21:41:25 +0000 Subject: [PATCH 52/70] HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and BlocksMap to {get|set|add}BlockCollection(..). Contributed by John George git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336909 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockInfo.java | 20 +-- .../server/blockmanagement/BlockManager.java | 132 +++++++++--------- .../blockmanagement/BlockPlacementPolicy.java | 12 +- .../BlockPlacementPolicyDefault.java | 2 +- .../server/blockmanagement/BlocksMap.java | 16 +-- .../hdfs/server/namenode/FSDirectory.java | 6 +- .../hdfs/server/namenode/FSEditLogLoader.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 8 +- .../hdfs/server/namenode/INodeFile.java | 4 +- .../namenode/INodeFileUnderConstruction.java | 2 +- .../server/namenode/NamenodeJspHelper.java | 2 +- .../blockmanagement/TestBlockManager.java | 2 +- .../BlockPlacementPolicyRaid.java | 28 ++-- .../TestBlockPlacementPolicyRaid.java | 26 ++-- 15 files changed, 134 insertions(+), 131 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8885859aa04..7b25112e2c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -441,6 +441,9 @@ Release 2.0.0 - UNRELEASED HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of final releases. (todd) + HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and + BlocksMap to {get|set|add}BlockCollection(..). (John George via szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 44089478f3b..be86b536c3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet; @InterfaceAudience.Private public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { - private BlockCollection inode; + private BlockCollection bc; /** For implementing {@link LightWeightGSet.LinkedElement} interface */ private LightWeightGSet.LinkedElement nextLinkedElement; @@ -57,13 +57,13 @@ public class BlockInfo extends Block implements */ public BlockInfo(int replication) { this.triplets = new Object[3*replication]; - this.inode = null; + this.bc = null; } public BlockInfo(Block blk, int replication) { super(blk); this.triplets = new Object[3*replication]; - this.inode = null; + this.bc = null; } /** @@ -72,16 +72,16 @@ public class BlockInfo extends Block implements * @param from BlockInfo to copy from. */ protected BlockInfo(BlockInfo from) { - this(from, from.inode.getReplication()); - this.inode = from.inode; + this(from, from.bc.getReplication()); + this.bc = from.bc; } - public BlockCollection getINode() { - return inode; + public BlockCollection getBlockCollection() { + return bc; } - public void setINode(BlockCollection inode) { - this.inode = inode; + public void setBlockCollection(BlockCollection bc) { + this.bc = bc; } DatanodeDescriptor getDatanode(int index) { @@ -334,7 +334,7 @@ public class BlockInfo extends Block implements BlockUCState s, DatanodeDescriptor[] targets) { if(isComplete()) { return new BlockInfoUnderConstruction( - this, getINode().getReplication(), s, targets); + this, getBlockCollection().getReplication(), s, targets); } // the block is already under construction BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a3c2761eaa6..1568e23ac51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -140,7 +140,7 @@ public class BlockManager { private final long replicationRecheckInterval; /** - * Mapping: Block -> { INode, datanodes, self ref } + * Mapping: Block -> { BlockCollection, datanodes, self ref } * Updated only in response to client-sent information. */ final BlocksMap blocksMap; @@ -190,7 +190,7 @@ public class BlockManager { public final short minReplication; /** Default number of replicas */ public final int defaultReplication; - /** The maximum number of entries returned by getCorruptInodes() */ + /** value returned by MAX_CORRUPT_FILES_RETURNED */ final int maxCorruptFilesReturned; /** variable to enable check for enough racks */ @@ -382,7 +382,7 @@ public class BlockManager { numReplicas.decommissionedReplicas(); if (block instanceof BlockInfo) { - String fileName = ((BlockInfo)block).getINode().getName(); + String fileName = ((BlockInfo)block).getBlockCollection().getName(); out.print(fileName + ": "); } // l: == live:, d: == decommissioned c: == corrupt e: == excess @@ -452,17 +452,17 @@ public class BlockManager { * Commit the last block of the file and mark it as complete if it has * meets the minimum replication requirement * - * @param fileINode file inode + * @param bc block collection * @param commitBlock - contains client reported block length and generation * @return true if the last block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ - public boolean commitOrCompleteLastBlock(MutableBlockCollection fileINode, + public boolean commitOrCompleteLastBlock(MutableBlockCollection bc, Block commitBlock) throws IOException { if(commitBlock == null) return false; // not committing, this is a block allocation retry - BlockInfo lastBlock = fileINode.getLastBlock(); + BlockInfo lastBlock = bc.getLastBlock(); if(lastBlock == null) return false; // no blocks in file yet if(lastBlock.isComplete()) @@ -470,22 +470,22 @@ public class BlockManager { final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock); if(countNodes(lastBlock).liveReplicas() >= minReplication) - completeBlock(fileINode, fileINode.numBlocks()-1, false); + completeBlock(bc, bc.numBlocks()-1, false); return b; } /** * Convert a specified block of the file to a complete block. - * @param fileINode file + * @param bc file * @param blkIndex block index in the file * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ - private BlockInfo completeBlock(final MutableBlockCollection fileINode, + private BlockInfo completeBlock(final MutableBlockCollection bc, final int blkIndex, boolean force) throws IOException { if(blkIndex < 0) return null; - BlockInfo curBlock = fileINode.getBlocks()[blkIndex]; + BlockInfo curBlock = bc.getBlocks()[blkIndex]; if(curBlock.isComplete()) return curBlock; BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock; @@ -498,7 +498,7 @@ public class BlockManager { "Cannot complete block: block has not been COMMITTED by the client"); BlockInfo completeBlock = ucBlock.convertToCompleteBlock(); // replace penultimate block in file - fileINode.setBlock(blkIndex, completeBlock); + bc.setBlock(blkIndex, completeBlock); // Since safe-mode only counts complete blocks, and we now have // one more complete block, we need to adjust the total up, and @@ -514,12 +514,12 @@ public class BlockManager { return blocksMap.replaceBlock(completeBlock); } - private BlockInfo completeBlock(final MutableBlockCollection fileINode, + private BlockInfo completeBlock(final MutableBlockCollection bc, final BlockInfo block, boolean force) throws IOException { - BlockInfo[] fileBlocks = fileINode.getBlocks(); + BlockInfo[] fileBlocks = bc.getBlocks(); for(int idx = 0; idx < fileBlocks.length; idx++) if(fileBlocks[idx] == block) { - return completeBlock(fileINode, idx, force); + return completeBlock(bc, idx, force); } return block; } @@ -529,10 +529,10 @@ public class BlockManager { * regardless of whether enough replicas are present. This is necessary * when tailing edit logs as a Standby. */ - public BlockInfo forceCompleteBlock(final MutableBlockCollection fileINode, + public BlockInfo forceCompleteBlock(final MutableBlockCollection bc, final BlockInfoUnderConstruction block) throws IOException { block.commitBlock(block); - return completeBlock(fileINode, block, true); + return completeBlock(bc, block, true); } @@ -546,14 +546,14 @@ public class BlockManager { * The methods returns null if there is no partial block at the end. * The client is supposed to allocate a new block with the next call. * - * @param fileINode file + * @param bc file * @return the last block locations if the block is partial or null otherwise */ public LocatedBlock convertLastBlockToUnderConstruction( - MutableBlockCollection fileINode) throws IOException { - BlockInfo oldBlock = fileINode.getLastBlock(); + MutableBlockCollection bc) throws IOException { + BlockInfo oldBlock = bc.getLastBlock(); if(oldBlock == null || - fileINode.getPreferredBlockSize() == oldBlock.getNumBytes()) + bc.getPreferredBlockSize() == oldBlock.getNumBytes()) return null; assert oldBlock == getStoredBlock(oldBlock) : "last block of the file is not in blocksMap"; @@ -561,7 +561,7 @@ public class BlockManager { DatanodeDescriptor[] targets = getNodes(oldBlock); BlockInfoUnderConstruction ucBlock = - fileINode.setLastBlock(oldBlock, targets); + bc.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); // Remove block from replication queue. @@ -581,7 +581,7 @@ public class BlockManager { // always decrement total blocks -1); - final long fileLength = fileINode.computeContentSummary().getLength(); + final long fileLength = bc.computeContentSummary().getLength(); final long pos = fileLength - ucBlock.getNumBytes(); return createLocatedBlock(ucBlock, pos, AccessMode.WRITE); } @@ -921,8 +921,8 @@ public class BlockManager { " does not exist. "); } - BlockCollection inode = storedBlock.getINode(); - if (inode == null) { + BlockCollection bc = storedBlock.getBlockCollection(); + if (bc == null) { NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + "block " + storedBlock + " could not be marked as corrupt as it" + @@ -936,7 +936,7 @@ public class BlockManager { // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason); - if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) { + if (countNodes(storedBlock).liveReplicas() >= bc.getReplication()) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(storedBlock, node); } else if (namesystem.isPopulatingReplQueues()) { @@ -1049,7 +1049,7 @@ public class BlockManager { int requiredReplication, numEffectiveReplicas; List containingNodes, liveReplicaNodes; DatanodeDescriptor srcNode; - BlockCollection fileINode = null; + BlockCollection bc = null; int additionalReplRequired; int scheduledWork = 0; @@ -1061,15 +1061,15 @@ public class BlockManager { for (int priority = 0; priority < blocksToReplicate.size(); priority++) { for (Block block : blocksToReplicate.get(priority)) { // block should belong to a file - fileINode = blocksMap.getINode(block); + bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append - if(fileINode == null || fileINode instanceof MutableBlockCollection) { + if(bc == null || bc instanceof MutableBlockCollection) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); continue; } - requiredReplication = fileINode.getReplication(); + requiredReplication = bc.getReplication(); // get a source data-node containingNodes = new ArrayList(); @@ -1105,7 +1105,7 @@ public class BlockManager { } else { additionalReplRequired = 1; // Needed on a new rack } - work.add(new ReplicationWork(block, fileINode, srcNode, + work.add(new ReplicationWork(block, bc, srcNode, containingNodes, liveReplicaNodes, additionalReplRequired, priority)); } @@ -1127,8 +1127,8 @@ public class BlockManager { // choose replication targets: NOT HOLDING THE GLOBAL LOCK // It is costly to extract the filename for which chooseTargets is called, - // so for now we pass in the Inode itself. - rw.targets = blockplacement.chooseTarget(rw.fileINode, + // so for now we pass in the block collection itself. + rw.targets = blockplacement.chooseTarget(rw.bc, rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes, excludedNodes, rw.block.getNumBytes()); } @@ -1147,15 +1147,15 @@ public class BlockManager { int priority = rw.priority; // Recheck since global lock was released // block should belong to a file - fileINode = blocksMap.getINode(block); + bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append - if(fileINode == null || fileINode instanceof MutableBlockCollection) { + if(bc == null || bc instanceof MutableBlockCollection) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; neededReplications.decrementReplicationIndex(priority); continue; } - requiredReplication = fileINode.getReplication(); + requiredReplication = bc.getReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -1914,7 +1914,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numCurrentReplica >= minReplication) { - completeBlock((MutableBlockCollection)storedBlock.getINode(), storedBlock, false); + completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false); } else if (storedBlock.isComplete()) { // check whether safe replication is reached for the block // only complete blocks are counted towards that. @@ -1942,7 +1942,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } else { storedBlock = block; } - if (storedBlock == null || storedBlock.getINode() == null) { + if (storedBlock == null || storedBlock.getBlockCollection() == null) { // If this block does not belong to anyfile, then we are done. NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on " + node + " size " + block.getNumBytes() @@ -1952,8 +1952,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return block; } assert storedBlock != null : "Block must be stored by now"; - BlockCollection fileINode = storedBlock.getINode(); - assert fileINode != null : "Block must belong to a file"; + BlockCollection bc = storedBlock.getBlockCollection(); + assert bc != null : "Block must belong to a file"; // add block to the datanode boolean added = node.addBlock(storedBlock); @@ -1979,7 +1979,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numLiveReplicas >= minReplication) { - storedBlock = completeBlock((MutableBlockCollection)fileINode, storedBlock, false); + storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false); } else if (storedBlock.isComplete()) { // check whether safe replication is reached for the block // only complete blocks are counted towards that @@ -1990,7 +1990,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } // if file is under construction, then done for now - if (fileINode instanceof MutableBlockCollection) { + if (bc instanceof MutableBlockCollection) { return storedBlock; } @@ -2000,7 +2000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } // handle underReplication/overReplication - short fileReplication = fileINode.getReplication(); + short fileReplication = bc.getReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedReplicas(), fileReplication); @@ -2127,8 +2127,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block * what happened with it. */ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { - BlockCollection fileINode = block.getINode(); - if (fileINode == null) { + BlockCollection bc = block.getBlockCollection(); + if (bc == null) { // block does not belong to any file addToInvalidates(block); return MisReplicationResult.INVALID; @@ -2139,7 +2139,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication - short expectedReplication = fileINode.getReplication(); + short expectedReplication = bc.getReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -2256,7 +2256,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block BlockPlacementPolicy replicator) { assert namesystem.hasWriteLock(); // first form a rack to datanodes map and - BlockCollection inode = getINode(b); + BlockCollection bc = getBlockCollection(b); final Map> rackMap = new HashMap>(); for(final Iterator iter = nonExcess.iterator(); @@ -2296,7 +2296,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block || (addedNode != null && !priSet.contains(addedNode))) ) { cur = delNodeHint; } else { // regular excessive replica removal - cur = replicator.chooseReplicaToDelete(inode, b, replication, + cur = replicator.chooseReplicaToDelete(bc, b, replication, priSet, remains); } firstOne = false; @@ -2377,8 +2377,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block // necessary. In that case, put block on a possibly-will- // be-replicated list. // - BlockCollection fileINode = blocksMap.getINode(block); - if (fileINode != null) { + BlockCollection bc = blocksMap.getBlockCollection(block); + if (bc != null) { namesystem.decrementSafeBlockCount(block); updateNeededReplications(block, -1, 0); } @@ -2609,7 +2609,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block NumberReplicas num) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); - BlockCollection fileINode = blocksMap.getINode(block); + BlockCollection bc = blocksMap.getBlockCollection(block); Iterator nodeIter = blocksMap.nodeIterator(block); StringBuilder nodeList = new StringBuilder(); while (nodeIter.hasNext()) { @@ -2622,7 +2622,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas() + ", excess replicas: " + num.excessReplicas() - + ", Is Open File: " + (fileINode instanceof MutableBlockCollection) + + ", Is Open File: " + (bc instanceof MutableBlockCollection) + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); @@ -2637,8 +2637,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block final Iterator it = srcNode.getBlockIterator(); while(it.hasNext()) { final Block block = it.next(); - BlockCollection fileINode = blocksMap.getINode(block); - short expectedReplication = fileINode.getReplication(); + BlockCollection bc = blocksMap.getBlockCollection(block); + short expectedReplication = bc.getReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { @@ -2660,9 +2660,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block final Iterator it = srcNode.getBlockIterator(); while(it.hasNext()) { final Block block = it.next(); - BlockCollection fileINode = blocksMap.getINode(block); + BlockCollection bc = blocksMap.getBlockCollection(block); - if (fileINode != null) { + if (bc != null) { NumberReplicas num = countNodes(block); int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); @@ -2677,7 +2677,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { decommissionOnlyReplicas++; } - if (fileINode instanceof MutableBlockCollection) { + if (bc instanceof MutableBlockCollection) { underReplicatedInOpenFiles++; } } @@ -2780,11 +2780,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block /* get replication factor of a block */ private int getReplication(Block block) { - BlockCollection fileINode = blocksMap.getINode(block); - if (fileINode == null) { // block does not belong to any file + BlockCollection bc = blocksMap.getBlockCollection(block); + if (bc == null) { // block does not belong to any file return 0; } - return fileINode.getReplication(); + return bc.getReplication(); } @@ -2856,12 +2856,12 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return this.neededReplications.getCorruptBlockSize(); } - public BlockInfo addINode(BlockInfo block, BlockCollection iNode) { - return blocksMap.addINode(block, iNode); + public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) { + return blocksMap.addBlockCollection(block, bc); } - public BlockCollection getINode(Block b) { - return blocksMap.getINode(b); + public BlockCollection getBlockCollection(Block b) { + return blocksMap.getBlockCollection(b); } /** @return an iterator of the datanodes. */ @@ -3000,7 +3000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block private static class ReplicationWork { private Block block; - private BlockCollection fileINode; + private BlockCollection bc; private DatanodeDescriptor srcNode; private List containingNodes; @@ -3011,14 +3011,14 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block private int priority; public ReplicationWork(Block block, - BlockCollection fileINode, + BlockCollection bc, DatanodeDescriptor srcNode, List containingNodes, List liveReplicaNodes, int additionalReplRequired, int priority) { this.block = block; - this.fileINode = fileINode; + this.bc = bc; this.srcNode = srcNode; this.containingNodes = containingNodes; this.liveReplicaNodes = liveReplicaNodes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index b07d70ca546..e1efae54193 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -110,11 +110,11 @@ public abstract class BlockPlacementPolicy { * choose numOfReplicas data nodes for writer * If not, return as many as we can. * The base implemenatation extracts the pathname of the file from the - * specified srcInode, but this could be a costly operation depending on the + * specified srcBC, but this could be a costly operation depending on the * file system implementation. Concrete implementations of this class should * override this method to avoid this overhead. * - * @param srcInode The inode of the file for which chooseTarget is being invoked. + * @param srcBC block collection of file for which chooseTarget is invoked. * @param numOfReplicas additional number of replicas wanted. * @param writer the writer's machine, null if not in the cluster. * @param chosenNodes datanodes that have been chosen as targets. @@ -122,13 +122,13 @@ public abstract class BlockPlacementPolicy { * @return array of DatanodeDescriptor instances chosen as target * and sorted as a pipeline. */ - DatanodeDescriptor[] chooseTarget(BlockCollection srcInode, + DatanodeDescriptor[] chooseTarget(BlockCollection srcBC, int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, HashMap excludedNodes, long blocksize) { - return chooseTarget(srcInode.getName(), numOfReplicas, writer, + return chooseTarget(srcBC.getName(), numOfReplicas, writer, chosenNodes, excludedNodes, blocksize); } @@ -149,7 +149,7 @@ public abstract class BlockPlacementPolicy { * Decide whether deleting the specified replica of the block still makes * the block conform to the configured block placement policy. * - * @param srcInode The inode of the file to which the block-to-be-deleted belongs + * @param srcBC block collection of file to which block-to-be-deleted belongs * @param block The block to be deleted * @param replicationFactor The required number of replicas for this block * @param existingReplicas The replica locations of this block that are present @@ -158,7 +158,7 @@ public abstract class BlockPlacementPolicy { listed in the previous parameter. * @return the replica that is the best candidate for deletion */ - abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcInode, + abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC, Block block, short replicationFactor, Collection existingReplicas, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index da25213cd98..a1e7a208ec7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -546,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { } @Override - public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode, + public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc, Block block, short replicationFactor, Collection first, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 71fc5443c9f..6757ef486b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet; /** * This class maintains the map from a block to its metadata. - * block's metadata currently includes INode it belongs to and + * block's metadata currently includes blockCollection it belongs to and * the datanodes that store the block. */ class BlocksMap { @@ -92,21 +92,21 @@ class BlocksMap { blocks = null; } - BlockCollection getINode(Block b) { + BlockCollection getBlockCollection(Block b) { BlockInfo info = blocks.get(b); - return (info != null) ? info.getINode() : null; + return (info != null) ? info.getBlockCollection() : null; } /** - * Add block b belonging to the specified file inode to the map. + * Add block b belonging to the specified block collection to the map. */ - BlockInfo addINode(BlockInfo b, BlockCollection iNode) { + BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) { BlockInfo info = blocks.get(b); if (info != b) { info = b; blocks.put(info); } - info.setINode(iNode); + info.setBlockCollection(bc); return info; } @@ -120,7 +120,7 @@ class BlocksMap { if (blockInfo == null) return; - blockInfo.setINode(null); + blockInfo.setBlockCollection(null); for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); dn.removeBlock(blockInfo); // remove from the list and wipe the location @@ -168,7 +168,7 @@ class BlocksMap { boolean removed = node.removeBlock(info); if (info.getDatanode(0) == null // no datanodes left - && info.getINode() == null) { // does not belong to a file + && info.getBlockCollection() == null) { // does not belong to a file blocks.remove(b); // remove block from the map } return removed; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ab0f4c4dddd..d4239288a4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -309,7 +309,7 @@ public class FSDirectory implements Closeable { INodeFile newF = (INodeFile)newNode; BlockInfo[] blocks = newF.getBlocks(); for (int i = 0; i < blocks.length; i++) { - newF.setBlock(i, getBlockManager().addINode(blocks[i], newF)); + newF.setBlock(i, getBlockManager().addBlockCollection(blocks[i], newF)); } } } finally { @@ -346,7 +346,7 @@ public class FSDirectory implements Closeable { fileINode.getReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); - getBlockManager().addINode(blockInfo, fileINode); + getBlockManager().addBlockCollection(blockInfo, fileINode); fileINode.addBlock(blockInfo); if(NameNode.stateChangeLog.isDebugEnabled()) { @@ -1127,7 +1127,7 @@ public class FSDirectory implements Closeable { int index = 0; for (BlockInfo b : newnode.getBlocks()) { - BlockInfo info = getBlockManager().addINode(b, newnode); + BlockInfo info = getBlockManager().addBlockCollection(b, newnode); newnode.setBlock(index, info); // inode refers to the block in BlocksMap index++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 8f2b107e798..76c661d8297 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -601,7 +601,7 @@ public class FSEditLogLoader { // OP_ADD operations as each block is allocated. newBI = new BlockInfo(newBlock, file.getReplication()); } - fsNamesys.getBlockManager().addINode(newBI, file); + fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 62bc1c3ba7e..55195e9e9b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2840,7 +2840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (storedBlock == null) { throw new IOException("Block (=" + lastblock + ") not found"); } - INodeFile iFile = (INodeFile) storedBlock.getINode(); + INodeFile iFile = (INodeFile) storedBlock.getBlockCollection(); if (!iFile.isUnderConstruction() || storedBlock.isComplete()) { throw new IOException("Unexpected block (=" + lastblock + ") since the file (=" + iFile.getLocalName() @@ -4135,7 +4135,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * Returns whether the given block is one pointed-to by a file. */ private boolean isValidBlock(Block b) { - return (blockManager.getINode(b) != null); + return (blockManager.getBlockCollection(b) != null); } // Distributed upgrade manager @@ -4394,7 +4394,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // check file inode - INodeFile file = (INodeFile) storedBlock.getINode(); + INodeFile file = (INodeFile) storedBlock.getBlockCollection(); if (file==null || !file.isUnderConstruction()) { throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); @@ -4706,7 +4706,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, while (blkIterator.hasNext()) { Block blk = blkIterator.next(); - INode inode = (INodeFile) blockManager.getINode(blk); + INode inode = (INodeFile) blockManager.getBlockCollection(blk); skip++; if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { String src = FSDirectory.getFullPathName(inode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 55cb68835bf..b3485ecc8ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -131,7 +131,7 @@ public class INodeFile extends INode implements BlockCollection { } for(BlockInfo bi: newlist) { - bi.setINode(this); + bi.setBlockCollection(this); } this.blocks = newlist; } @@ -164,7 +164,7 @@ public class INodeFile extends INode implements BlockCollection { if(blocks != null && v != null) { for (BlockInfo blk : blocks) { v.add(blk); - blk.setINode(null); + blk.setBlockCollection(null); } } blocks = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 03b0fbd2169..66e33e077d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -156,7 +156,7 @@ public class INodeFileUnderConstruction extends INodeFile BlockInfoUnderConstruction ucBlock = lastBlock.convertToBlockUnderConstruction( BlockUCState.UNDER_CONSTRUCTION, targets); - ucBlock.setINode(this); + ucBlock.setBlockCollection(this); setBlock(numBlocks()-1, ucBlock); return ucBlock; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index b4e48227cf6..f284aaa6702 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -734,7 +734,7 @@ class NamenodeJspHelper { this.inode = null; } else { this.block = new Block(blockId); - this.inode = (INodeFile) blockManager.getINode(block); + this.inode = (INodeFile) blockManager.getBlockCollection(block); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index f13b2779fad..4d8a371747f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -384,7 +384,7 @@ public class TestBlockManager { Mockito.doReturn((short)3).when(iNode).getReplication(); BlockInfo blockInfo = blockOnNodes(blockId, nodes); - bm.blocksMap.addINode(blockInfo, iNode); + bm.blocksMap.addBlockCollection(blockInfo, iNode); return blockInfo; } diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java index 77977118f2a..5099eea9177 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java @@ -144,25 +144,25 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { /** {@inheritDoc} */ @Override - public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode, + public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc, Block block, short replicationFactor, Collection first, Collection second) { DatanodeDescriptor chosenNode = null; try { - String path = cachedFullPathNames.get(inode); + String path = cachedFullPathNames.get(bc); FileType type = getFileType(path); if (type == FileType.NOT_RAID) { return defaultPolicy.chooseReplicaToDelete( - inode, block, replicationFactor, first, second); + bc, block, replicationFactor, first, second); } List companionBlocks = getCompanionBlocks(path, type, block); if (companionBlocks == null || companionBlocks.size() == 0) { // Use the default method if it is not a valid raided or parity file return defaultPolicy.chooseReplicaToDelete( - inode, block, replicationFactor, first, second); + bc, block, replicationFactor, first, second); } // Delete from the first collection first // This ensures the number of unique rack of this block is not reduced @@ -174,12 +174,12 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { return chosenNode; } return defaultPolicy.chooseReplicaToDelete( - inode, block, replicationFactor, first, second); + bc, block, replicationFactor, first, second); } catch (Exception e) { LOG.debug("Error happend when choosing replica to delete" + StringUtils.stringifyException(e)); return defaultPolicy.chooseReplicaToDelete( - inode, block, replicationFactor, first, second); + bc, block, replicationFactor, first, second); } } @@ -446,25 +446,25 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy { }; static private class INodeWithHashCode { - BlockCollection inode; - INodeWithHashCode(BlockCollection inode) { - this.inode = inode; + BlockCollection bc; + INodeWithHashCode(BlockCollection bc) { + this.bc= bc; } @Override public boolean equals(Object obj) { - return inode == obj; + return bc== obj; } @Override public int hashCode() { - return System.identityHashCode(inode); + return System.identityHashCode(bc); } String getFullPathName() { - return inode.getName(); + return bc.getName(); } } - public String get(BlockCollection inode) throws IOException { - return cacheInternal.get(new INodeWithHashCode(inode)); + public String get(BlockCollection bc) throws IOException { + return cacheInternal.get(new INodeWithHashCode(bc)); } } diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java b/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java index 19d81434647..84cc534d215 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java @@ -240,19 +240,19 @@ public class TestBlockPlacementPolicyRaid { // test full path cache CachedFullPathNames cachedFullPathNames = new CachedFullPathNames(namesystem); - final BlockCollection[] inodes = NameNodeRaidTestUtil.getBlockCollections( + final BlockCollection[] bcs = NameNodeRaidTestUtil.getBlockCollections( namesystem, file1, file2); - verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]); - verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]); - verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]); - verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]); + verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]); + verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]); + verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]); + verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]); try { Thread.sleep(1200L); } catch (InterruptedException e) { } - verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]); - verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]); + verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]); + verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]); } finally { if (cluster != null) { cluster.shutdown(); @@ -476,14 +476,14 @@ public class TestBlockPlacementPolicyRaid { } private void verifyCachedFullPathNameResult( - CachedFullPathNames cachedFullPathNames, BlockCollection inode) + CachedFullPathNames cachedFullPathNames, BlockCollection bc) throws IOException { - String res1 = inode.getName(); - String res2 = cachedFullPathNames.get(inode); + String res1 = bc.getName(); + String res2 = cachedFullPathNames.get(bc); LOG.info("Actual path name: " + res1); LOG.info("Cached path name: " + res2); - Assert.assertEquals(cachedFullPathNames.get(inode), - inode.getName()); + Assert.assertEquals(cachedFullPathNames.get(bc), + bc.getName()); } private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks, @@ -502,7 +502,7 @@ public class TestBlockPlacementPolicyRaid { private Collection getCompanionBlocks( FSNamesystem namesystem, BlockPlacementPolicyRaid policy, ExtendedBlock block) throws IOException { - INodeFile inode = (INodeFile)blockManager.blocksMap.getINode(block + INodeFile inode = (INodeFile)blockManager.blocksMap.getBlockCollection(block .getLocalBlock()); FileType type = policy.getFileType(inode.getFullPathName()); return policy.getCompanionBlocks(inode.getFullPathName(), type, From 98b00d7cc015555642068827e6c52eaed0740c94 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 23:13:02 +0000 Subject: [PATCH 53/70] HDFS-3134. harden edit log loader against malformed or malicious input. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336943 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../token/block/BlockTokenIdentifier.java | 3 +- .../hdfs/server/namenode/FSEditLogOp.java | 31 +++++++- .../hdfs/server/namenode/TestEditLog.java | 72 +++++++++++++++++++ 4 files changed, 106 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7b25112e2c0..a14099e39a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -444,6 +444,9 @@ Release 2.0.0 - UNRELEASED HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and BlocksMap to {get|set|add}BlockCollection(..). (John George via szetszwo) + HDFS-3134. harden edit log loader against malformed or malicious input. + (Colin Patrick McCabe via eli) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java index c1fd3f9f826..62f2d762379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java @@ -148,7 +148,8 @@ public class BlockTokenIdentifier extends TokenIdentifier { userId = WritableUtils.readString(in); blockPoolId = WritableUtils.readString(in); blockId = WritableUtils.readVLong(in); - int length = WritableUtils.readVInt(in); + int length = WritableUtils.readVIntInRange(in, 0, + AccessMode.class.getEnumConstants().length); for (int i = 0; i < length; i++) { modes.add(WritableUtils.readEnum(in, AccessMode.class)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 56a610f101e..9f7742cc674 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -203,6 +203,10 @@ public abstract class FSEditLogOp { } T setBlocks(Block[] blocks) { + if (blocks.length > MAX_BLOCKS) { + throw new RuntimeException("Can't have more than " + MAX_BLOCKS + + " in an AddCloseOp."); + } this.blocks = blocks; return (T)this; } @@ -296,10 +300,18 @@ public abstract class FSEditLogOp { } } + static final public int MAX_BLOCKS = 1024 * 1024 * 64; + private static Block[] readBlocks( DataInputStream in, int logVersion) throws IOException { int numBlocks = in.readInt(); + if (numBlocks < 0) { + throw new IOException("invalid negative number of blocks"); + } else if (numBlocks > MAX_BLOCKS) { + throw new IOException("invalid number of blocks: " + numBlocks + + ". The maximum number of blocks per file is " + MAX_BLOCKS); + } Block[] blocks = new Block[numBlocks]; for (int i = 0; i < numBlocks; i++) { Block blk = new Block(); @@ -579,6 +591,7 @@ public abstract class FSEditLogOp { String trg; String[] srcs; long timestamp; + final static public int MAX_CONCAT_SRC = 1024 * 1024; private ConcatDeleteOp() { super(OP_CONCAT_DELETE); @@ -594,7 +607,12 @@ public abstract class FSEditLogOp { } ConcatDeleteOp setSources(String[] srcs) { + if (srcs.length > MAX_CONCAT_SRC) { + throw new RuntimeException("ConcatDeleteOp can only have " + + MAX_CONCAT_SRC + " sources at most."); + } this.srcs = srcs; + return this; } @@ -624,8 +642,8 @@ public abstract class FSEditLogOp { if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); if (length < 3) { // trg, srcs.., timestamp - throw new IOException("Incorrect data format. " - + "Concat delete operation."); + throw new IOException("Incorrect data format " + + "for ConcatDeleteOp."); } } this.trg = FSImageSerialization.readString(in); @@ -635,6 +653,15 @@ public abstract class FSEditLogOp { } else { srcSize = this.length - 1 - 1; // trg and timestamp } + if (srcSize < 0) { + throw new IOException("Incorrect data format. " + + "ConcatDeleteOp cannot have a negative number of data " + + " sources."); + } else if (srcSize > MAX_CONCAT_SRC) { + throw new IOException("Incorrect data format. " + + "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC + + " sources, but we tried to have " + (length - 3) + " sources."); + } this.srcs = new String [srcSize]; for(int i=0; i Date: Thu, 10 May 2012 23:15:53 +0000 Subject: [PATCH 54/70] HADOOP-8361. Avoid out-of-memory problems when deserializing strings. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336945 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../java/org/apache/hadoop/fs/FileStatus.java | 16 ++++----- .../fs/permission/PermissionStatus.java | 8 ++--- .../main/java/org/apache/hadoop/io/Text.java | 32 ++++++++++++++++- .../java/org/apache/hadoop/io/TestText.java | 36 ++++++++++++++++++- 5 files changed, 81 insertions(+), 14 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d849db5d0d6..91e09d181ab 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -305,6 +305,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual final release. (todd) + HADOOP-8361. Avoid out-of-memory problems when deserializing strings. + (Colin Patrick McCabe via eli) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index f4492e2f235..4cc2c182d53 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -254,7 +254,7 @@ public class FileStatus implements Writable, Comparable { // Writable ////////////////////////////////////////////////// public void write(DataOutput out) throws IOException { - Text.writeString(out, getPath().toString()); + Text.writeString(out, getPath().toString(), Text.ONE_MEGABYTE); out.writeLong(getLen()); out.writeBoolean(isDirectory()); out.writeShort(getReplication()); @@ -262,16 +262,16 @@ public class FileStatus implements Writable, Comparable { out.writeLong(getModificationTime()); out.writeLong(getAccessTime()); getPermission().write(out); - Text.writeString(out, getOwner()); - Text.writeString(out, getGroup()); + Text.writeString(out, getOwner(), Text.ONE_MEGABYTE); + Text.writeString(out, getGroup(), Text.ONE_MEGABYTE); out.writeBoolean(isSymlink()); if (isSymlink()) { - Text.writeString(out, getSymlink().toString()); + Text.writeString(out, getSymlink().toString(), Text.ONE_MEGABYTE); } } public void readFields(DataInput in) throws IOException { - String strPath = Text.readString(in); + String strPath = Text.readString(in, Text.ONE_MEGABYTE); this.path = new Path(strPath); this.length = in.readLong(); this.isdir = in.readBoolean(); @@ -280,10 +280,10 @@ public class FileStatus implements Writable, Comparable { modification_time = in.readLong(); access_time = in.readLong(); permission.readFields(in); - owner = Text.readString(in); - group = Text.readString(in); + owner = Text.readString(in, Text.ONE_MEGABYTE); + group = Text.readString(in, Text.ONE_MEGABYTE); if (in.readBoolean()) { - this.symlink = new Path(Text.readString(in)); + this.symlink = new Path(Text.readString(in, Text.ONE_MEGABYTE)); } else { this.symlink = null; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java index a26d2f422a9..5642d0f5b92 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java @@ -84,8 +84,8 @@ public class PermissionStatus implements Writable { /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { - username = Text.readString(in); - groupname = Text.readString(in); + username = Text.readString(in, Text.ONE_MEGABYTE); + groupname = Text.readString(in, Text.ONE_MEGABYTE); permission = FsPermission.read(in); } @@ -110,8 +110,8 @@ public class PermissionStatus implements Writable { String username, String groupname, FsPermission permission) throws IOException { - Text.writeString(out, username); - Text.writeString(out, groupname); + Text.writeString(out, username, Text.ONE_MEGABYTE); + Text.writeString(out, groupname, Text.ONE_MEGABYTE); permission.write(out); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index e38dd3c79a5..cae70d74b43 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -53,6 +53,8 @@ import org.apache.hadoop.classification.InterfaceStability; public class Text extends BinaryComparable implements WritableComparable { + static final int SHORT_STRING_MAX = 1024 * 1024; + private static ThreadLocal ENCODER_FACTORY = new ThreadLocal() { protected CharsetEncoder initialValue() { @@ -412,6 +414,8 @@ public class Text extends BinaryComparable return bytes; } + static final public int ONE_MEGABYTE = 1024 * 1024; + /** Read a UTF8 encoded string from in */ public static String readString(DataInput in) throws IOException { @@ -420,7 +424,17 @@ public class Text extends BinaryComparable in.readFully(bytes, 0, length); return decode(bytes); } - + + /** Read a UTF8 encoded string with a maximum size + */ + public static String readString(DataInput in, int maxLength) + throws IOException { + int length = WritableUtils.readVIntInRange(in, 0, maxLength - 1); + byte [] bytes = new byte[length]; + in.readFully(bytes, 0, length); + return decode(bytes); + } + /** Write a UTF8 encoded string to out */ public static int writeString(DataOutput out, String s) throws IOException { @@ -431,6 +445,22 @@ public class Text extends BinaryComparable return length; } + /** Write a UTF8 encoded string with a maximum size to out + */ + public static int writeString(DataOutput out, String s, int maxLength) + throws IOException { + ByteBuffer bytes = encode(s); + int length = bytes.limit(); + if (length >= maxLength) { + throw new IOException("string was too long to write! Expected " + + "less than " + maxLength + " bytes, but got " + + length + " bytes."); + } + WritableUtils.writeVInt(out, length); + out.write(bytes.array(), 0, length); + return length; + } + ////// states for validateUTF8 private static final int LEAD_BYTE = 0; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index a7718bfba70..a86c532badc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -20,6 +20,7 @@ package org.apache.hadoop.io; import junit.framework.TestCase; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; import java.util.Random; @@ -107,7 +108,6 @@ public class TestText extends TestCase { } } - public void testIO() throws Exception { DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); @@ -136,6 +136,40 @@ public class TestText extends TestCase { assertTrue(before.equals(after2)); } } + + public void doTestLimitedIO(String str, int strLen) throws IOException { + DataOutputBuffer out = new DataOutputBuffer(); + DataInputBuffer in = new DataInputBuffer(); + + out.reset(); + try { + Text.writeString(out, str, strLen); + fail("expected writeString to fail when told to write a string " + + "that was too long! The string was '" + str + "'"); + } catch (IOException e) { + } + Text.writeString(out, str, strLen + 1); + + // test that it reads correctly + in.reset(out.getData(), out.getLength()); + in.mark(strLen); + String after; + try { + after = Text.readString(in, strLen); + fail("expected readString to fail when told to read a string " + + "that was too long! The string was '" + str + "'"); + } catch (IOException e) { + } + in.reset(); + after = Text.readString(in, strLen + 1); + assertTrue(str.equals(after)); + } + + public void testLimitedIO() throws Exception { + doTestLimitedIO("abcd", 4); + doTestLimitedIO("", 0); + doTestLimitedIO("1", 1); + } public void testCompare() throws Exception { DataOutputBuffer out1 = new DataOutputBuffer(); From 1a4539abf7603dfaf24ffe764c51ae901ce75c68 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 23:17:29 +0000 Subject: [PATCH 55/70] Remove SHORT_STRING_MAX, left out of the previous commit. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336946 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/src/main/java/org/apache/hadoop/io/Text.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index cae70d74b43..0bee33236d6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -53,8 +53,6 @@ import org.apache.hadoop.classification.InterfaceStability; public class Text extends BinaryComparable implements WritableComparable { - static final int SHORT_STRING_MAX = 1024 * 1024; - private static ThreadLocal ENCODER_FACTORY = new ThreadLocal() { protected CharsetEncoder initialValue() { From dfd733401d8d5e8f7eb6edd35fd4702316eb40ea Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 May 2012 23:48:12 +0000 Subject: [PATCH 56/70] HADOOP-8388. Remove unused BlockLocation serialization. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336966 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../org/apache/hadoop/fs/BlockLocation.java | 69 +--------------- .../apache/hadoop/fs/TestBlockLocation.java | 78 ------------------- 3 files changed, 5 insertions(+), 145 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 91e09d181ab..d293fe4940b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -145,6 +145,9 @@ Release 2.0.0 - UNRELEASED HADOOP-7920. Remove Avro Rpc. (suresh) + HADOOP-8388. Remove unused BlockLocation serialization. + (Colin Patrick McCabe via eli) + NEW FEATURES HADOOP-7773. Add support for protocol buffer based RPC engine. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java index 7a107cf0564..46989f2204c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java @@ -35,16 +35,7 @@ import org.apache.hadoop.io.WritableFactory; */ @InterfaceAudience.Public @InterfaceStability.Stable -public class BlockLocation implements Writable { - - static { // register a ctor - WritableFactories.setFactory - (BlockLocation.class, - new WritableFactory() { - public Writable newInstance() { return new BlockLocation(); } - }); - } - +public class BlockLocation { private String[] hosts; //hostnames of datanodes private String[] names; //hostname:portNumber of datanodes private String[] topologyPaths; // full path name in network topology @@ -219,62 +210,6 @@ public class BlockLocation implements Writable { } } - /** - * Implement write of Writable - */ - public void write(DataOutput out) throws IOException { - out.writeLong(offset); - out.writeLong(length); - out.writeBoolean(corrupt); - out.writeInt(names.length); - for (int i=0; i < names.length; i++) { - Text name = new Text(names[i]); - name.write(out); - } - out.writeInt(hosts.length); - for (int i=0; i < hosts.length; i++) { - Text host = new Text(hosts[i]); - host.write(out); - } - out.writeInt(topologyPaths.length); - for (int i=0; i < topologyPaths.length; i++) { - Text host = new Text(topologyPaths[i]); - host.write(out); - } - } - - /** - * Implement readFields of Writable - */ - public void readFields(DataInput in) throws IOException { - this.offset = in.readLong(); - this.length = in.readLong(); - this.corrupt = in.readBoolean(); - int numNames = in.readInt(); - this.names = new String[numNames]; - for (int i = 0; i < numNames; i++) { - Text name = new Text(); - name.readFields(in); - names[i] = name.toString(); - } - - int numHosts = in.readInt(); - this.hosts = new String[numHosts]; - for (int i = 0; i < numHosts; i++) { - Text host = new Text(); - host.readFields(in); - hosts[i] = host.toString(); - } - - int numTops = in.readInt(); - topologyPaths = new String[numTops]; - for (int i = 0; i < numTops; i++) { - Text path = new Text(); - path.readFields(in); - topologyPaths[i] = path.toString(); - } - } - public String toString() { StringBuilder result = new StringBuilder(); result.append(offset); @@ -289,4 +224,4 @@ public class BlockLocation implements Writable { } return result.toString(); } -} +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java deleted file mode 100644 index fdc877c210b..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs; - -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.io.DataOutputBuffer; - -public class TestBlockLocation extends TestCase { - // Verify fix of bug identified in HADOOP-6004 - public void testDeserialization() throws IOException { - // Create a test BlockLocation - String[] names = {"one", "two" }; - String[] hosts = {"three", "four" }; - String[] topologyPaths = {"five", "six"}; - long offset = 25l; - long length = 55l; - - BlockLocation bl = new BlockLocation(names, hosts, topologyPaths, - offset, length); - - DataOutputBuffer dob = new DataOutputBuffer(); - - // Serialize it - try { - bl.write(dob); - } catch (IOException e) { - fail("Unable to serialize data: " + e.getMessage()); - } - - byte[] bytes = dob.getData(); - DataInput da = new DataInputStream(new ByteArrayInputStream(bytes)); - - // Try to re-create the BlockLocation the same way as is done during - // deserialization - BlockLocation bl2 = new BlockLocation(); - - try { - bl2.readFields(da); - } catch (IOException e) { - fail("Unable to deserialize BlockLocation: " + e.getMessage()); - } - - // Check that we got back what we started with - verifyDeserialization(bl2.getHosts(), hosts); - verifyDeserialization(bl2.getNames(), names); - verifyDeserialization(bl2.getTopologyPaths(), topologyPaths); - assertEquals(bl2.getOffset(), offset); - assertEquals(bl2.getLength(), length); - } - - private void verifyDeserialization(String[] ar1, String[] ar2) { - assertEquals(ar1.length, ar2.length); - - for(int i = 0; i < ar1.length; i++) - assertEquals(ar1[i], ar2[i]); - } -} From fcb4b1995b53f6b56777246f83d92496b3778795 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 May 2012 00:03:37 +0000 Subject: [PATCH 57/70] HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336972 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 ++- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 41 ++++++++++++++++++- .../hadoop/hdfs/TestDFSClientRetries.java | 2 +- .../org/apache/hadoop/hdfs/TestGetBlocks.java | 3 +- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 15 +++---- .../blockmanagement/TestBlockManager.java | 29 ++++++------- .../blockmanagement/TestHost2NodesMap.java | 14 +++---- .../TestReplicationPolicy.java | 17 ++++---- .../server/datanode/TestBPOfferService.java | 2 +- .../server/datanode/TestBlockRecovery.java | 8 ++-- .../impl/TestInterDatanodeProtocol.java | 3 +- .../hadoop/net/TestNetworkTopology.java | 23 ++++++----- 12 files changed, 99 insertions(+), 64 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a14099e39a2..ef3932f8b30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -447,6 +447,10 @@ Release 2.0.0 - UNRELEASED HDFS-3134. harden edit log loader against malformed or malicious input. (Colin Patrick McCabe via eli) + HDFS-3230. Cleanup DatanodeID creation in the tests. (eli) + + HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) @@ -640,8 +644,6 @@ Release 2.0.0 - UNRELEASED HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm) - HDFS-3230. Cleanup DatanodeID creation in the tests. (eli) - BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index eded4667dca..5b50cefe102 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -706,12 +707,19 @@ public class DFSTestUtil { .join(nameservices)); } + private static DatanodeID getDatanodeID(String ipAddr) { + return new DatanodeID(ipAddr, "localhost", + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT); + } + public static DatanodeID getLocalDatanodeID() { - return new DatanodeID("127.0.0.1", "localhost", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT); + return new DatanodeID("127.0.0.1", "localhost", + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT); } public static DatanodeID getLocalDatanodeID(int port) { - return new DatanodeID("127.0.0.1", "localhost", "", port, port, port); + return new DatanodeID("127.0.0.1", "localhost", "", + port, port, port); } public static DatanodeDescriptor getLocalDatanodeDescriptor() { @@ -722,7 +730,36 @@ public class DFSTestUtil { return new DatanodeInfo(getLocalDatanodeID()); } + public static DatanodeInfo getDatanodeInfo(String ipAddr) { + return new DatanodeInfo(getDatanodeID(ipAddr)); + } + public static DatanodeInfo getLocalDatanodeInfo(int port) { return new DatanodeInfo(getLocalDatanodeID(port)); } + + public static DatanodeInfo getDatanodeInfo(String ipAddr, + String host, int port) { + return new DatanodeInfo(new DatanodeID(ipAddr, host, port)); + } + + public static DatanodeInfo getLocalDatanodeInfo(String ipAddr, + String hostname, AdminStates adminState) { + return new DatanodeInfo(ipAddr, hostname, "storage", + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, + DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, + DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT, + 1, 2, 3, 4, 5, 6, "local", adminState); + } + + public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, + String rackLocation) { + return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, + rackLocation); + } + + public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, + int port, String rackLocation) { + return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index b675f09049b..ad0f74e83ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -333,7 +333,7 @@ public class TestDFSClientRetries extends TestCase { LocatedBlock badLocatedBlock = new LocatedBlock( goodLocatedBlock.getBlock(), new DatanodeInfo[] { - new DatanodeInfo(new DatanodeID("255.255.255.255", 234)) + DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234) }, goodLocatedBlock.getStartOffset(), false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index 72c27bc9433..7370f72126c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -121,8 +121,7 @@ public class TestGetBlocks extends TestCase { getBlocksWithException(namenode, dataNodes[0], -1); // get blocks of size BlockSize from a non-existent datanode - DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo(); - info.setIpAddr("1.2.3.4"); + DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4"); getBlocksWithException(namenode, info, 2); } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 6a8d687a3aa..217960bca8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -400,16 +400,11 @@ public class TestPBHelper { @Test public void testConvertLocatedBlock() { - DatanodeInfo [] dnInfos = new DatanodeInfo[3]; - dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0", - 5000, 5001, 5002, 20000, 10001, 9999, - 59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS); - dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1", - 5000, 5001, 5002, 20000, 10001, 9999, - 59, 69, 32, "local", AdminStates.DECOMMISSIONED); - dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2", - 5000, 5001, 5002, 20000, 10001, 9999, - 59, 69, 32, "local", AdminStates.NORMAL); + DatanodeInfo [] dnInfos = { + DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), + DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED), + DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL) + }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); LocatedBlockProto lbProto = PBHelper.convert(lb); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 4d8a371747f..743fb3b08df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -26,6 +26,7 @@ import java.util.Map.Entry; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -76,12 +77,12 @@ public class TestBlockManager { Mockito.doReturn(true).when(fsn).hasWriteLock(); bm = new BlockManager(fsn, fsn, conf); nodes = ImmutableList.of( - new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"), - new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"), - new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB") + DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"), + DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"), + DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB") ); rackA = nodes.subList(0, 3); rackB = nodes.subList(3, 6); @@ -277,7 +278,7 @@ public class TestBlockManager { // the block is still under-replicated. Add a new node. This should allow // the third off-rack replica. DatanodeDescriptor rackCNode = - new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC"); + DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC"); addNodes(ImmutableList.of(rackCNode)); try { DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo); @@ -317,13 +318,13 @@ public class TestBlockManager { @Test public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception { - List nodes = ImmutableList.of( - new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA") + List nodes = ImmutableList.of( + DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"), + DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA") ); addNodes(nodes); List origNodes = nodes.subList(0, 3);; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index 44fa7c24a5a..081438075c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.junit.Before; import org.junit.Test; @@ -32,10 +32,10 @@ public class TestHost2NodesMap { @Before public void setup() { dataNodes = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"), }; for (DatanodeDescriptor node : dataNodes) { map.add(node); @@ -46,7 +46,7 @@ public class TestHost2NodesMap { @Test public void testContains() throws Exception { DatanodeDescriptor nodeNotInMap = - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4"); + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4"); for (int i = 0; i < dataNodes.length; i++) { assertTrue(map.contains(dataNodes[i])); } @@ -66,7 +66,7 @@ public class TestHost2NodesMap { @Test public void testRemove() throws Exception { DatanodeDescriptor nodeNotInMap = - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4"); + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4"); assertFalse(map.remove(nodeNotInMap)); assertTrue(map.remove(dataNodes[0])); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 9ce24a7fae6..ce570f7eba2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -56,12 +56,12 @@ public class TestReplicationPolicy { public static void setupCluster() throws Exception { Configuration conf = new HdfsConfiguration(); dataNodes = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3") + DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"), + DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3") }; FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); @@ -329,6 +329,7 @@ public class TestReplicationPolicy { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); } } + /** * In this testcase, client is is a node outside of file system. * So the 1st replica can be placed on any node. @@ -338,8 +339,8 @@ public class TestReplicationPolicy { */ @Test public void testChooseTarget5() throws Exception { - DatanodeDescriptor writerDesc = - new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r4"); + DatanodeDescriptor writerDesc = + DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4"); DatanodeDescriptor[] targets; targets = replicator.chooseTarget(filename, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index c2ec8147913..78e05412c76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -115,7 +115,7 @@ public class TestBPOfferService { 0, HdfsConstants.LAYOUT_VERSION)) .when(mock).versionRequest(); - Mockito.doReturn(new DatanodeRegistration("fake-node", 100)) + Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100)) .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class)); Mockito.doAnswer(new HeartbeatAnswer(nnIdx)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index d575d44efde..6c890b894d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -47,6 +47,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -197,9 +198,9 @@ public class TestBlockRecovery { locs, RECOVERY_ID); ArrayList syncList = new ArrayList(2); BlockRecord record1 = new BlockRecord( - new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1); + DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1); BlockRecord record2 = new BlockRecord( - new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2); + DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2); syncList.add(record1); syncList.add(record2); @@ -401,8 +402,7 @@ public class TestBlockRecovery { private Collection initRecoveringBlocks() throws IOException { Collection blocks = new ArrayList(1); - DatanodeInfo mockOtherDN = new DatanodeInfo( - new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0)); + DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] locs = new DatanodeInfo[] { new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())), mockOtherDN }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index b7195a34326..a5c85510743 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -356,8 +356,7 @@ public class TestInterDatanodeProtocol { server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = new DatanodeID( - "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); + DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); InterDatanodeProtocol proxy = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java index 2ce6a8fd0e5..a18af908fc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java @@ -21,6 +21,7 @@ package org.apache.hadoop.net; import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -36,13 +37,13 @@ public class TestNetworkTopology { @Before public void setupDatanodes() { dataNodes = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r3") + DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"), + DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"), + DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3") }; for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); @@ -52,7 +53,7 @@ public class TestNetworkTopology { @Test public void testContains() throws Exception { DatanodeDescriptor nodeNotInMap = - new DatanodeDescriptor(new DatanodeID("8.8.8.8", 5020), "/d2/r4"); + DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4"); for (int i=0; i < dataNodes.length; i++) { assertTrue(cluster.contains(dataNodes[i])); } @@ -68,9 +69,9 @@ public class TestNetworkTopology { public void testCreateInvalidTopology() throws Exception { NetworkTopology invalCluster = new NetworkTopology(); DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1") + DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"), + DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1") }; invalCluster.add(invalDataNodes[0]); invalCluster.add(invalDataNodes[1]); From ba6360e493829312210666af93467d3d35d9d59d Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 11 May 2012 00:25:48 +0000 Subject: [PATCH 58/70] HDFS-3385. The last block of INodeFileUnderConstruction is not necessarily a BlockInfoUnderConstruction, so do not cast it in FSNamesystem.recoverLeaseInternal(..). git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336976 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++ .../hdfs/server/namenode/FSNamesystem.java | 29 ++++++------- .../apache/hadoop/hdfs/TestFileAppend.java | 42 +++++++++++++++++++ 3 files changed, 59 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ef3932f8b30..a5f068f8bbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -644,6 +644,10 @@ Release 2.0.0 - UNRELEASED HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm) + HDFS-3385. The last block of INodeFileUnderConstruction is not + necessarily a BlockInfoUnderConstruction, so do not cast it in + FSNamesystem.recoverLeaseInternal(..). (szetszwo) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 55195e9e9b2..f1072f9bfe3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1783,24 +1783,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats, "Failed to close file " + src + ". Lease recovery is in progress. Try again later."); } else { - BlockInfoUnderConstruction lastBlock=pendingFile.getLastBlock(); - if(lastBlock != null && lastBlock.getBlockUCState() == - BlockUCState.UNDER_RECOVERY) { - throw new RecoveryInProgressException( - "Recovery in progress, file [" + src + "], " + - "lease owner [" + lease.getHolder() + "]"); - } else { - throw new AlreadyBeingCreatedException( - "Failed to create file [" + src + "] for [" + holder + - "] on client [" + clientMachine + - "], because this file is already being created by [" + - pendingFile.getClientName() + "] on [" + - pendingFile.getClientMachine() + "]"); - } - } + final BlockInfo lastBlock = pendingFile.getLastBlock(); + if (lastBlock != null + && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { + throw new RecoveryInProgressException("Recovery in progress, file [" + + src + "], " + "lease owner [" + lease.getHolder() + "]"); + } else { + throw new AlreadyBeingCreatedException("Failed to create file [" + + src + "] for [" + holder + "] on client [" + clientMachine + + "], because this file is already being created by [" + + pendingFile.getClientName() + "] on [" + + pendingFile.getClientMachine() + "]"); + } + } } } - } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 20f28376a8e..54ff9036b91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -31,12 +31,15 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.ipc.RemoteException; +import org.junit.Assert; import org.junit.Test; /** @@ -295,4 +298,43 @@ public class TestFileAppend{ cluster.shutdown(); } } + + /** Test two consecutive appends on a file with a full block. */ + @Test + public void testAppendTwice() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + final FileSystem fs1 = cluster.getFileSystem(); + final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); + try { + + final Path p = new Path("/testAppendTwice/foo"); + final int len = 1 << 16; + final byte[] fileContents = AppendTestUtil.initBuffer(len); + + { + // create a new file with a full block. + FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len); + out.write(fileContents, 0, len); + out.close(); + } + + //1st append does not add any data so that the last block remains full + //and the last block in INodeFileUnderConstruction is a BlockInfo + //but not BlockInfoUnderConstruction. + fs2.append(p); + + //2nd append should get AlreadyBeingCreatedException + fs1.append(p); + Assert.fail(); + } catch(RemoteException re) { + AppendTestUtil.LOG.info("Got an exception:", re); + Assert.assertEquals(AlreadyBeingCreatedException.class.getName(), + re.getClassName()); + } finally { + fs2.close(); + fs1.close(); + cluster.shutdown(); + } + } } From 8def56e51e32f52a9bd30ca2196b27456b7bc0db Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 May 2012 03:15:54 +0000 Subject: [PATCH 59/70] HDFS-3400. DNs should be able start with jsvc even if security is disabled. Contributed by Aaron T. Myers git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337017 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop-hdfs/src/main/bin/hdfs | 27 +++++++++++-------- .../datanode/SecureDataNodeStarter.java | 9 +++---- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a5f068f8bbc..9dafb118d28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -451,6 +451,9 @@ Release 2.0.0 - UNRELEASED HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli) + HDFS-3400. DNs should be able start with jsvc even if security is disabled. + (atm via eli) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 40b370407b3..986849e918d 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -56,16 +56,21 @@ shift # Determine if we're starting a secure datanode, and if so, redefine appropriate variables if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then - if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then - HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR + if [ -n "$JSVC_HOME" ]; then + if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then + HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR + fi + + if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then + HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR + fi + + HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER + starting_secure_dn="true" + else + echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\ + "isn't set. Falling back to starting insecure DN." fi - - if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then - HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR - fi - - HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER - starting_secure_dn="true" fi if [ "$COMMAND" = "namenode" ] ; then @@ -125,12 +130,12 @@ if [ "$starting_secure_dn" = "true" ]; then if [ "$HADOOP_PID_DIR" = "" ]; then HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid" else - HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" + HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" fi JSVC=$JSVC_HOME/jsvc if [ ! -f $JSVC ]; then - echo "JSVC_HOME is not set correctly so jsvc can not be found. Jsvc is required to run secure datanodes. " + echo "JSVC_HOME is not set correctly so jsvc cannot be found. Jsvc is required to run secure datanodes. " echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\ "and set JSVC_HOME to the directory containing the jsvc binary." exit diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index f7da29b4c9d..2d1ff6437b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.security.UserGroupInformation; import org.mortbay.jetty.nio.SelectChannelConnector; /** @@ -60,10 +61,7 @@ public class SecureDataNodeStarter implements Daemon { @Override public void init(DaemonContext context) throws Exception { System.err.println("Initializing secure datanode resources"); - // We should only start up a secure datanode in a Kerberos-secured cluster - Configuration conf = new Configuration(); // Skip UGI method to not log in - if(!conf.get(HADOOP_SECURITY_AUTHENTICATION).equals("kerberos")) - throw new RuntimeException("Cannot start secure datanode in unsecure cluster"); + Configuration conf = new Configuration(); // Stash command-line arguments for regular datanode args = context.getArguments(); @@ -98,7 +96,8 @@ public class SecureDataNodeStarter implements Daemon { System.err.println("Successfully obtained privileged resources (streaming port = " + ss + " ) (http listener port = " + listener.getConnection() +")"); - if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) { + if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) && + UserGroupInformation.isSecurityEnabled()) { throw new RuntimeException("Cannot start secure datanode with unprivileged ports"); } System.err.println("Opened streaming server at " + streamingAddr); From 5dbd09ee768d2fcc9e925ceec72c3d6258defd04 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 11 May 2012 05:57:37 +0000 Subject: [PATCH 60/70] HDFS-3026. HA: Handle failure during HA state transition. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337030 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/namenode/NameNode.java | 85 +++++++++++++++---- .../ha/TestStateTransitionFailure.java | 80 +++++++++++++++++ 3 files changed, 149 insertions(+), 18 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9dafb118d28..3dc9b698ebc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -651,6 +651,8 @@ Release 2.0.0 - UNRELEASED necessarily a BlockInfoUnderConstruction, so do not cast it in FSNamesystem.recoverLeaseInternal(..). (szetszwo) + HDFS-3026. HA: Handle failure during HA state transition. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 56ba8a2082b..6f87fa7a5cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -206,6 +206,7 @@ public class NameNode { private final boolean haEnabled; private final HAContext haContext; protected boolean allowStaleStandbyReads; + private Runtime runtime = Runtime.getRuntime(); /** httpServer */ @@ -481,11 +482,16 @@ public class NameNode { } private void startTrashEmptier(Configuration conf) throws IOException { - long trashInterval - = conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, - CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); - if(trashInterval == 0) + long trashInterval = conf.getLong( + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, + CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); + if (trashInterval == 0) { return; + } else if (trashInterval < 0) { + throw new IOException("Cannot start tresh emptier with negative interval." + + " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a" + + " positive value."); + } this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); @@ -1235,14 +1241,37 @@ public class NameNode { } return state.getServiceState(); } + + @VisibleForTesting + public synchronized void setRuntimeForTesting(Runtime runtime) { + this.runtime = runtime; + } /** - * Class used as expose {@link NameNode} as context to {@link HAState} + * Shutdown the NN immediately in an ungraceful way. Used when it would be + * unsafe for the NN to continue operating, e.g. during a failed HA state + * transition. * - * TODO(HA): - * When entering and exiting state, on failing to start services, - * appropriate action is needed todo either shutdown the node or recover - * from failure. + * @param t exception which warrants the shutdown. Printed to the NN log + * before exit. + * @throws ServiceFailedException thrown only for testing. + */ + private synchronized void doImmediateShutdown(Throwable t) + throws ServiceFailedException { + String message = "Error encountered requiring NN shutdown. " + + "Shutting down immediately."; + try { + LOG.fatal(message, t); + } catch (Throwable ignored) { + // This is unlikely to happen, but there's nothing we can do if it does. + } + runtime.exit(1); + // This code is only reached during testing, when runtime is stubbed out. + throw new ServiceFailedException(message, t); + } + + /** + * Class used to expose {@link NameNode} as context to {@link HAState} */ protected class NameNodeHAContext implements HAContext { @Override @@ -1257,32 +1286,52 @@ public class NameNode { @Override public void startActiveServices() throws IOException { - namesystem.startActiveServices(); - startTrashEmptier(conf); + try { + namesystem.startActiveServices(); + startTrashEmptier(conf); + } catch (Throwable t) { + doImmediateShutdown(t); + } } @Override public void stopActiveServices() throws IOException { - if (namesystem != null) { - namesystem.stopActiveServices(); + try { + if (namesystem != null) { + namesystem.stopActiveServices(); + } + stopTrashEmptier(); + } catch (Throwable t) { + doImmediateShutdown(t); } - stopTrashEmptier(); } @Override public void startStandbyServices() throws IOException { - namesystem.startStandbyServices(conf); + try { + namesystem.startStandbyServices(conf); + } catch (Throwable t) { + doImmediateShutdown(t); + } } @Override public void prepareToStopStandbyServices() throws ServiceFailedException { - namesystem.prepareToStopStandbyServices(); + try { + namesystem.prepareToStopStandbyServices(); + } catch (Throwable t) { + doImmediateShutdown(t); + } } @Override public void stopStandbyServices() throws IOException { - if (namesystem != null) { - namesystem.stopStandbyServices(); + try { + if (namesystem != null) { + namesystem.stopStandbyServices(); + } + } catch (Throwable t) { + doImmediateShutdown(t); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java new file mode 100644 index 00000000000..bf1ca52b79d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ha.ServiceFailedException; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.Test; + +/** + * Tests to verify the behavior of failing to fully start transition HA states. + */ +public class TestStateTransitionFailure { + + public static final Log LOG = LogFactory.getLog(TestStateTransitionFailure.class); + + /** + * Ensure that a failure to fully transition to the active state causes a + * shutdown of the NameNode. + */ + @Test + public void testFailureToTransitionCausesShutdown() throws IOException { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + // Set an illegal value for the trash emptier interval. This will cause + // the NN to fail to transition to the active state. + conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, -1); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(0) + .build(); + cluster.waitActive(); + Runtime mockRuntime = mock(Runtime.class); + cluster.getNameNode(0).setRuntimeForTesting(mockRuntime); + verify(mockRuntime, times(0)).exit(anyInt()); + try { + cluster.transitionToActive(0); + fail("Transitioned to active but should not have been able to."); + } catch (ServiceFailedException sfe) { + assertExceptionContains("Error encountered requiring NN shutdown. " + + "Shutting down immediately.", sfe); + LOG.info("got expected exception", sfe); + } + verify(mockRuntime, times(1)).exit(anyInt()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} From aea890f7d215d97feec873228158daefa2e63217 Mon Sep 17 00:00:00 2001 From: Thomas White Date: Fri, 11 May 2012 15:00:48 +0000 Subject: [PATCH 61/70] MAPREDUCE-4148. MapReduce should not have a compile-time dependency on HDFS. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337199 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/security/token/Token.java | 70 +++++++++++++++++-- .../hadoop/security/token/TestToken.java | 19 +++++ ...ache.hadoop.security.token.TokenIdentifier | 2 + ...ache.hadoop.security.token.TokenIdentifier | 2 + hadoop-mapreduce-project/CHANGES.txt | 3 + ...ache.hadoop.security.token.TokenIdentifier | 1 + .../hadoop-mapreduce-client-core/pom.xml | 1 + .../apache/hadoop/mapreduce/JobSubmitter.java | 4 +- .../hadoop/mapreduce/security/TokenCache.java | 7 +- .../token/DelegationTokenRenewal.java | 1 - ...ache.hadoop.security.token.TokenIdentifier | 2 + .../hadoop-mapreduce-client/pom.xml | 2 +- .../hadoop-mapreduce-examples/pom.xml | 2 +- ...ache.hadoop.security.token.TokenIdentifier | 4 ++ ...ache.hadoop.security.token.TokenIdentifier | 1 + hadoop-mapreduce-project/pom.xml | 2 +- 16 files changed, 106 insertions(+), 17 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java index e95ade860b0..bbddf6fdc78 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java @@ -18,10 +18,15 @@ package org.apache.hadoop.security.token; +import com.google.common.collect.Maps; + +import java.io.ByteArrayInputStream; import java.io.DataInput; +import java.io.DataInputStream; import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; +import java.util.Map; import java.util.ServiceLoader; import org.apache.commons.codec.binary.Base64; @@ -37,6 +42,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.ReflectionUtils; /** * The client-side form of the token. @@ -45,6 +51,9 @@ import org.apache.hadoop.io.WritableUtils; @InterfaceStability.Evolving public class Token implements Writable { public static final Log LOG = LogFactory.getLog(Token.class); + + private static Map> tokenKindMap; + private byte[] identifier; private byte[] password; private Text kind; @@ -100,13 +109,49 @@ public class Token implements Writable { } /** - * Get the token identifier - * @return the token identifier + * Get the token identifier's byte representation + * @return the token identifier's byte representation */ public byte[] getIdentifier() { return identifier; } + private static synchronized Class + getClassForIdentifier(Text kind) { + if (tokenKindMap == null) { + tokenKindMap = Maps.newHashMap(); + for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) { + tokenKindMap.put(id.getKind(), id.getClass()); + } + } + Class cls = tokenKindMap.get(kind); + if (cls == null) { + LOG.warn("Cannot find class for token kind " + kind); + return null; + } + return cls; + } + + /** + * Get the token identifier object, or null if it could not be constructed + * (because the class could not be loaded, for example). + * @return the token identifier, or null + * @throws IOException + */ + @SuppressWarnings("unchecked") + public T decodeIdentifier() throws IOException { + Class cls = getClassForIdentifier(getKind()); + if (cls == null) { + return null; + } + TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null); + ByteArrayInputStream buf = new ByteArrayInputStream(identifier); + DataInputStream in = new DataInputStream(buf); + tokenIdentifier.readFields(in); + in.close(); + return (T) tokenIdentifier; + } + /** * Get the token password/secret * @return the token password/secret @@ -260,16 +305,31 @@ public class Token implements Writable { buffer.append(num); } } + + private void identifierToString(StringBuilder buffer) { + T id = null; + try { + id = decodeIdentifier(); + } catch (IOException e) { + // handle in the finally block + } finally { + if (id != null) { + buffer.append("(").append(id).append(")"); + } else { + addBinaryBuffer(buffer, identifier); + } + } + } @Override public String toString() { StringBuilder buffer = new StringBuilder(); - buffer.append("Ident: "); - addBinaryBuffer(buffer, identifier); - buffer.append(", Kind: "); + buffer.append("Kind: "); buffer.append(kind.toString()); buffer.append(", Service: "); buffer.append(service.toString()); + buffer.append(", Ident: "); + identifierToString(buffer); return buffer.toString(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java index 54b75da23bf..6d7d695663b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java @@ -18,11 +18,15 @@ package org.apache.hadoop.security.token; +import static junit.framework.Assert.assertEquals; + import java.io.*; import java.util.Arrays; import org.apache.hadoop.io.*; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; +import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier; +import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager; import junit.framework.TestCase; @@ -94,5 +98,20 @@ public class TestToken extends TestCase { checkUrlSafe(encode); } } + + public void testDecodeIdentifier() throws IOException { + TestDelegationTokenSecretManager secretManager = + new TestDelegationTokenSecretManager(0, 0, 0, 0); + secretManager.startThreads(); + TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier( + new Text("owner"), new Text("renewer"), new Text("realUser")); + + Token token = + new Token(id, secretManager); + TokenIdentifier idCopy = token.decodeIdentifier(); + + assertNotSame(id, idCopy); + assertEquals(id, idCopy); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier new file mode 100644 index 00000000000..891a67b61f4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -0,0 +1,2 @@ +org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier +org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier new file mode 100644 index 00000000000..10b874b6855 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -0,0 +1,2 @@ +org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier +org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 7b8e045a2d4..0be7073a9a3 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -287,6 +287,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-4231. Update RAID to use the new BlockCollection interface. (szetszwo) + MAPREDUCE-4148. MapReduce should not have a compile-time dependency on + HDFS. (tomwhite) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier new file mode 100644 index 00000000000..0975deab7e7 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -0,0 +1 @@ +org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index cfb8ce4bd7e..e60d745faa5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -37,6 +37,7 @@ org.apache.hadoop hadoop-hdfs + test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 4038f65cd46..148df503243 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -38,7 +38,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.QueueACL; @@ -433,8 +432,7 @@ class JobSubmitter { LOG.debug("Printing tokens for job: " + jobId); for(Token token: credentials.getAllTokens()) { if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) { - LOG.debug("Submitting with " + - org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.stringifyToken(token)); + LOG.debug("Submitting with " + token); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java index 9e8c1909a13..1109f3f3825 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java @@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Master; @@ -179,16 +178,14 @@ public class TokenCache { * @param namenode * @return delegation token */ - @SuppressWarnings("unchecked") @InterfaceAudience.Private - public static Token getDelegationToken( + public static Token getDelegationToken( Credentials credentials, String namenode) { //No fs specific tokens issues by this fs. It may however issue tokens // for other filesystems - which would be keyed by that filesystems name. if (namenode == null) return null; - return (Token) credentials.getToken(new Text( - namenode)); + return (Token) credentials.getToken(new Text(namenode)); } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java index e4675b523a5..90007770691 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java @@ -39,7 +39,6 @@ import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier new file mode 100644 index 00000000000..f797a6aa6ff --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -0,0 +1,2 @@ +org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier +org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 5a96b315395..3cff07d1333 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -114,8 +114,8 @@ org.apache.hadoop - hadoop-hdfs + test com.google.inject.extensions diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index 3520839173a..f02617f4678 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -57,7 +57,7 @@ org.apache.hadoop hadoop-hdfs - provided + runtime org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier new file mode 100644 index 00000000000..fc669de1572 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -0,0 +1,4 @@ +org.apache.hadoop.yarn.security.ContainerTokenIdentifier +org.apache.hadoop.yarn.security.ApplicationTokenIdentifier +org.apache.hadoop.yarn.security.client.ClientTokenIdentifier +org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier new file mode 100644 index 00000000000..6ed6e3261e2 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -0,0 +1 @@ +org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index cca84273558..3b340fddbb1 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -128,8 +128,8 @@ org.apache.hadoop - hadoop-hdfs + test com.google.inject From cc9c6bdce213eae1a4f132acb125cb8caed223ce Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 11 May 2012 16:15:18 +0000 Subject: [PATCH 62/70] HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop. Contributed by Roman Shaposhnik. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337251 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/bin/hadoop-daemon.sh | 11 +++++++++-- hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh | 11 +++++++++-- .../hadoop-yarn/bin/yarn-daemon.sh | 11 +++++++++-- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d293fe4940b..6400769b298 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -311,6 +311,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8361. Avoid out-of-memory problems when deserializing strings. (Colin Patrick McCabe via eli) + HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop. + (Roman Shaposhnik via atm) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh index 1a4d6446fb5..16e711429ce 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh @@ -111,6 +111,7 @@ export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"} export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"} log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid +HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5} # Set default scheduling priority if [ "$HADOOP_NICENESS" = "" ]; then @@ -162,9 +163,15 @@ case $startStop in (stop) if [ -f $pid ]; then - if kill -0 `cat $pid` > /dev/null 2>&1; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then echo stopping $command - kill `cat $pid` + kill $TARGET_PID + sleep $HADOOP_STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi else echo no $command to stop fi diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh index 4cd6eb1ec8d..ed2eef0eb9d 100644 --- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh +++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh @@ -94,6 +94,7 @@ export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,RFA} export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA} log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid +YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5} # Set default scheduling priority if [ "$YARN_NICENESS" = "" ]; then @@ -129,9 +130,15 @@ case $startStop in (stop) if [ -f $pid ]; then - if kill -0 `cat $pid` > /dev/null 2>&1; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then echo stopping $command - kill `cat $pid` + kill $TARGET_PID + sleep $YARN_STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi else echo no $command to stop fi diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh index 89ae9d87bea..07326a166bc 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh +++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh @@ -93,6 +93,7 @@ export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA} log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid +YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5} # Set default scheduling priority if [ "$YARN_NICENESS" = "" ]; then @@ -128,9 +129,15 @@ case $startStop in (stop) if [ -f $pid ]; then - if kill -0 `cat $pid` > /dev/null 2>&1; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then echo stopping $command - kill `cat $pid` + kill $TARGET_PID + sleep $YARN_STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi else echo no $command to stop fi From 2116f28d9e95896b54f4dc60336dc3f6ac7d64f3 Mon Sep 17 00:00:00 2001 From: Sanjay Radia Date: Fri, 11 May 2012 16:56:52 +0000 Subject: [PATCH 63/70] HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337283 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../java/org/apache/hadoop/ipc/Client.java | 24 +++--- .../java/org/apache/hadoop/ipc/Server.java | 75 ++++++++++++++----- .../java/org/apache/hadoop/ipc/Status.java | 32 -------- .../src/main/proto/RpcPayloadHeader.proto | 27 ++++++- 5 files changed, 95 insertions(+), 65 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 6400769b298..a4cdfedd7d3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -67,6 +67,8 @@ Trunk (unreleased changes) HADOOP-8308. Support cross-project Jenkins builds. (tomwhite) + HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 083141311b5..ef32cfde3a9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -53,6 +53,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto; import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto; +import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto; +import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; @@ -845,24 +847,24 @@ public class Client { touch(); try { - int id = in.readInt(); // try to read an id - + RpcResponseHeaderProto response = + RpcResponseHeaderProto.parseDelimitedFrom(in); + int callId = response.getCallId(); if (LOG.isDebugEnabled()) - LOG.debug(getName() + " got value #" + id); + LOG.debug(getName() + " got value #" + callId); - Call call = calls.get(id); - - int state = in.readInt(); // read call status - if (state == Status.SUCCESS.state) { + Call call = calls.get(callId); + RpcStatusProto status = response.getStatus(); + if (status == RpcStatusProto.SUCCESS) { Writable value = ReflectionUtils.newInstance(valueClass, conf); value.readFields(in); // read value call.setRpcResponse(value); - calls.remove(id); - } else if (state == Status.ERROR.state) { + calls.remove(callId); + } else if (status == RpcStatusProto.ERROR) { call.setException(new RemoteException(WritableUtils.readString(in), WritableUtils.readString(in))); - calls.remove(id); - } else if (state == Status.FATAL.state) { + calls.remove(callId); + } else if (status == RpcStatusProto.FATAL) { // Close the connection markClosed(new RemoteException(WritableUtils.readString(in), WritableUtils.readString(in))); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 6369f3aa7d8..3173ad07ba0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -1339,7 +1339,7 @@ public abstract class Server { + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + ") is configured as simple. Please configure another method " + "like kerberos or digest."); - setupResponse(authFailedResponse, authFailedCall, Status.FATAL, + setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null, ae.getClass().getName(), ae.getMessage()); responder.doRespond(authFailedCall); throw ae; @@ -1420,7 +1420,7 @@ public abstract class Server { Call fakeCall = new Call(-1, null, this); // Versions 3 and greater can interpret this exception // response in the same manner - setupResponse(buffer, fakeCall, Status.FATAL, + setupResponseOldVersionFatal(buffer, fakeCall, null, VersionMismatch.class.getName(), errMsg); responder.doRespond(fakeCall); @@ -1443,7 +1443,7 @@ public abstract class Server { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); Call fakeCall = new Call(-1, null, this); - setupResponse(buffer, fakeCall, Status.FATAL, null, + setupResponse(buffer, fakeCall, RpcStatusProto.FATAL, null, IpcException.class.getName(), errMsg); responder.doRespond(fakeCall); } @@ -1579,7 +1579,7 @@ public abstract class Server { new Call(header.getCallId(), null, this); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); - setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null, + setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null, IOException.class.getName(), "Unknown rpc kind " + header.getRpcKind()); responder.doRespond(readParamsFailedCall); @@ -1597,7 +1597,7 @@ public abstract class Server { new Call(header.getCallId(), null, this); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); - setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null, + setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null, t.getClass().getName(), "IPC server unable to read call parameters: " + t.getMessage()); responder.doRespond(readParamsFailedCall); @@ -1627,7 +1627,7 @@ public abstract class Server { rpcMetrics.incrAuthorizationSuccesses(); } catch (AuthorizationException ae) { rpcMetrics.incrAuthorizationFailures(); - setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null, + setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null, ae.getClass().getName(), ae.getMessage()); responder.doRespond(authFailedCall); return false; @@ -1725,8 +1725,8 @@ public abstract class Server { // responder.doResponse() since setupResponse may use // SASL to encrypt response data and SASL enforces // its own message ordering. - setupResponse(buf, call, (error == null) ? Status.SUCCESS - : Status.ERROR, value, errorClass, error); + setupResponse(buf, call, (error == null) ? RpcStatusProto.SUCCESS + : RpcStatusProto.ERROR, value, errorClass, error); // Discard the large buf and reset it back to smaller size // to free up heap @@ -1859,40 +1859,79 @@ public abstract class Server { /** * Setup response for the IPC Call. * - * @param response buffer to serialize the response into + * @param responseBuf buffer to serialize the response into * @param call {@link Call} to which we are setting up the response - * @param status {@link Status} of the IPC call + * @param status of the IPC call * @param rv return value for the IPC Call, if the call was successful * @param errorClass error class, if the the call failed * @param error error message, if the call failed * @throws IOException */ - private void setupResponse(ByteArrayOutputStream response, - Call call, Status status, + private void setupResponse(ByteArrayOutputStream responseBuf, + Call call, RpcStatusProto status, Writable rv, String errorClass, String error) throws IOException { - response.reset(); - DataOutputStream out = new DataOutputStream(response); - out.writeInt(call.callId); // write call id - out.writeInt(status.state); // write status + responseBuf.reset(); + DataOutputStream out = new DataOutputStream(responseBuf); + RpcResponseHeaderProto.Builder response = + RpcResponseHeaderProto.newBuilder(); + response.setCallId(call.callId); + response.setStatus(status); - if (status == Status.SUCCESS) { + + if (status == RpcStatusProto.SUCCESS) { try { + response.build().writeDelimitedTo(out); rv.write(out); } catch (Throwable t) { LOG.warn("Error serializing call response for call " + call, t); // Call back to same function - this is OK since the // buffer is reset at the top, and since status is changed // to ERROR it won't infinite loop. - setupResponse(response, call, Status.ERROR, + setupResponse(responseBuf, call, RpcStatusProto.ERROR, null, t.getClass().getName(), StringUtils.stringifyException(t)); return; } } else { + if (status == RpcStatusProto.FATAL) { + response.setServerIpcVersionNum(Server.CURRENT_VERSION); + } + response.build().writeDelimitedTo(out); WritableUtils.writeString(out, errorClass); WritableUtils.writeString(out, error); } + if (call.connection.useWrap) { + wrapWithSasl(responseBuf, call); + } + call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray())); + } + + /** + * Setup response for the IPC Call on Fatal Error from a + * client that is using old version of Hadoop. + * The response is serialized using the previous protocol's response + * layout. + * + * @param response buffer to serialize the response into + * @param call {@link Call} to which we are setting up the response + * @param rv return value for the IPC Call, if the call was successful + * @param errorClass error class, if the the call failed + * @param error error message, if the call failed + * @throws IOException + */ + private void setupResponseOldVersionFatal(ByteArrayOutputStream response, + Call call, + Writable rv, String errorClass, String error) + throws IOException { + final int OLD_VERSION_FATAL_STATUS = -1; + response.reset(); + DataOutputStream out = new DataOutputStream(response); + out.writeInt(call.callId); // write call id + out.writeInt(OLD_VERSION_FATAL_STATUS); // write FATAL_STATUS + WritableUtils.writeString(out, errorClass); + WritableUtils.writeString(out, error); + if (call.connection.useWrap) { wrapWithSasl(response, call); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java deleted file mode 100644 index 16fd871ffa6..00000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ipc; - -/** - * Status of a Hadoop IPC call. - */ -enum Status { - SUCCESS (0), - ERROR (1), - FATAL (-1); - - int state; - private Status(int state) { - this.state = state; - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto index 42dea3bde3e..50657413012 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto @@ -19,7 +19,6 @@ option java_package = "org.apache.hadoop.ipc.protobuf"; option java_outer_classname = "RpcPayloadHeaderProtos"; option java_generate_equals_and_hash = true; - /** * This is the rpc payload header. It is sent with every rpc call. * @@ -34,8 +33,6 @@ option java_generate_equals_and_hash = true; * */ - - /** * RpcKind determine the rpcEngine and the serialization of the rpc payload */ @@ -54,5 +51,27 @@ enum RpcPayloadOperationProto { message RpcPayloadHeaderProto { // the header for the RpcRequest optional RpcKindProto rpcKind = 1; optional RpcPayloadOperationProto rpcOp = 2; - optional uint32 callId = 3; // each rpc has a callId that is also used in response + required uint32 callId = 3; // each rpc has a callId that is also used in response +} + +enum RpcStatusProto { + SUCCESS = 0; // RPC succeeded + ERROR = 1; // RPC Failed + FATAL = 2; // Fatal error - connection is closed +} + +/** + * Rpc Response Header + * - If successfull then the Respose follows after this header + * - length (4 byte int), followed by the response + * - If error or fatal - the exception info follow + * - length (4 byte int) Class name of exception - UTF-8 string + * - length (4 byte int) Stacktrace - UTF-8 string + * - if the strings are null then the length is -1 + * In case of Fatal error then the respose contains the Serverside's IPC version + */ +message RpcResponseHeaderProto { + required uint32 callId = 1; // callId used in Request + required RpcStatusProto status = 2; + optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error } From e2af2f1b87c300dae0b3d816f5a64b0dcd006c35 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 May 2012 19:21:57 +0000 Subject: [PATCH 64/70] HADOOP-8316. Audit logging should be disabled by default. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337334 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../hadoop-common/src/main/conf/log4j.properties | 6 +++--- .../src/main/packages/templates/conf/hadoop-env.sh | 4 ++-- .../src/main/packages/templates/conf/log4j.properties | 6 +++--- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index a4cdfedd7d3..b00de53dbaf 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -513,6 +513,8 @@ Release 2.0.0 - UNRELEASED HADOOP-7868. Hadoop native fails to compile when default linker option is -Wl,--as-needed. (Trevor Robinson via eli) + HADOOP-8316. Audit logging should be disabled by default. (eli) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 3470b3ef1b7..63e27cf72f8 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n # #Security appender # -hadoop.security.logger=INFO,console +hadoop.security.logger=INFO,NullAppender hadoop.security.log.maxfilesize=256MB hadoop.security.log.maxbackupindex=20 log4j.category.SecurityLogger=${hadoop.security.logger} @@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd # # hdfs audit logging # -hdfs.audit.logger=INFO,console +hdfs.audit.logger=INFO,NullAppender hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} @@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex} # # mapred audit logging # -mapred.audit.logger=INFO,console +mapred.audit.logger=INFO,NullAppender mapred.audit.log.maxfilesize=256MB mapred.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh index e7f16fee7e0..a9771832dd0 100644 --- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh @@ -48,10 +48,10 @@ done export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS" # Command specific options appended to HADOOP_OPTS when specified -export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS" +export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,NullAppender $HADOOP_NAMENODE_OPTS" export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" -export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS" +export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,NullAppender $HADOOP_SECONDARYNAMENODE_OPTS" # The following applies to multiple commands (fs, dfs, fsck, distcp etc) export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS" diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties index 3470b3ef1b7..63e27cf72f8 100644 --- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties @@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n # #Security appender # -hadoop.security.logger=INFO,console +hadoop.security.logger=INFO,NullAppender hadoop.security.log.maxfilesize=256MB hadoop.security.log.maxbackupindex=20 log4j.category.SecurityLogger=${hadoop.security.logger} @@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd # # hdfs audit logging # -hdfs.audit.logger=INFO,console +hdfs.audit.logger=INFO,NullAppender hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} @@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex} # # mapred audit logging # -mapred.audit.logger=INFO,console +mapred.audit.logger=INFO,NullAppender mapred.audit.log.maxfilesize=256MB mapred.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} From f092e9fc8a2b58c755ecfc6828cc3e2af624b90b Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 May 2012 19:25:07 +0000 Subject: [PATCH 65/70] HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts. Contributed by Tomohiko Kinebuchi git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337339 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/bin/hadoop-daemon.sh | 1 + .../src/main/packages/templates/conf/hadoop-env.sh | 4 ++-- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index b00de53dbaf..cd561606c96 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -316,6 +316,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop. (Roman Shaposhnik via atm) + HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts. + (Tomohiko Kinebuchi via eli) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh index 16e711429ce..80393a59034 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh @@ -109,6 +109,7 @@ fi export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"} export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"} +export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"} log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5} diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh index a9771832dd0..8fea86388f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh @@ -48,10 +48,10 @@ done export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS" # Command specific options appended to HADOOP_OPTS when specified -export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,NullAppender $HADOOP_NAMENODE_OPTS" +export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" -export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,NullAppender $HADOOP_SECONDARYNAMENODE_OPTS" +export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" # The following applies to multiple commands (fs, dfs, fsck, distcp etc) export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS" From a446ad2c26359beb2b5367195de4257fbae648c6 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 11 May 2012 20:26:42 +0000 Subject: [PATCH 66/70] MAPREDUCE-4233. NPE can happen in RMNMNodeInfo. (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337363 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 + .../hadoop/mapreduce/v2/TestRMNMInfo.java | 43 ++++++++++++++++++- .../yarn/server/resourcemanager/RMNMInfo.java | 10 +++-- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 0be7073a9a3..ff4664675d4 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -481,6 +481,8 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4237. TestNodeStatusUpdater can fail if localhost has a domain associated with it (bobby) + MAPREDUCE-4233. NPE can happen in RMNMNodeInfo. (bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java index d3b47520715..4ee485644d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java @@ -21,6 +21,8 @@ package org.apache.hadoop.mapreduce.v2; import java.io.File; import java.io.IOException; import java.util.Iterator; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -28,8 +30,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; @@ -37,6 +42,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import static org.mockito.Mockito.*; public class TestRMNMInfo { private static final Log LOG = LogFactory.getLog(TestRMNMInfo.class); @@ -116,14 +122,47 @@ public class TestRMNMInfo { n.get("HealthStatus").getValueAsText().contains("Healthy")); Assert.assertNotNull(n.get("LastHealthUpdate")); Assert.assertNotNull(n.get("HealthReport")); - Assert.assertNotNull(n.get("NumContainersMB")); + Assert.assertNotNull(n.get("NumContainers")); Assert.assertEquals( n.get("NodeId") + ": Unexpected number of used containers", - 0, n.get("NumContainersMB").getValueAsInt()); + 0, n.get("NumContainers").getValueAsInt()); Assert.assertEquals( n.get("NodeId") + ": Unexpected amount of used memory", 0, n.get("UsedMemoryMB").getValueAsInt()); Assert.assertNotNull(n.get("AvailableMemoryMB")); } } + + @Test + public void testRMNMInfoMissmatch() throws Exception { + RMContext rmc = mock(RMContext.class); + ResourceScheduler rms = mock(ResourceScheduler.class); + ConcurrentMap map = new ConcurrentHashMap(); + RMNode node = MockNodes.newNodeInfo(1, MockNodes.newResource(4 * 1024)); + map.put(node.getNodeID(), node); + when(rmc.getRMNodes()).thenReturn(map); + + RMNMInfo rmInfo = new RMNMInfo(rmc,rms); + String liveNMs = rmInfo.getLiveNodeManagers(); + ObjectMapper mapper = new ObjectMapper(); + JsonNode jn = mapper.readTree(liveNMs); + Assert.assertEquals("Unexpected number of live nodes:", + 1, jn.size()); + Iterator it = jn.iterator(); + while (it.hasNext()) { + JsonNode n = it.next(); + Assert.assertNotNull(n.get("HostName")); + Assert.assertNotNull(n.get("Rack")); + Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING", + n.get("State").getValueAsText().contains("RUNNING")); + Assert.assertNotNull(n.get("NodeHTTPAddress")); + Assert.assertTrue("Node " + n.get("NodeId") + " should be Healthy", + n.get("HealthStatus").getValueAsText().contains("Healthy")); + Assert.assertNotNull(n.get("LastHealthUpdate")); + Assert.assertNotNull(n.get("HealthReport")); + Assert.assertNull(n.get("NumContainers")); + Assert.assertNull(n.get("UsedMemoryMB")); + Assert.assertNull(n.get("AvailableMemoryMB")); + } + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java index 34d203578f3..0db42e40ec0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java @@ -93,10 +93,12 @@ public class RMNMInfo implements RMNMInfoBeans { ni.getNodeHealthStatus().getLastHealthReportTime()); info.put("HealthReport", ni.getNodeHealthStatus().getHealthReport()); - info.put("NumContainersMB", report.getNumContainers()); - info.put("UsedMemoryMB", report.getUsedResource().getMemory()); - info.put("AvailableMemoryMB", - report.getAvailableResource().getMemory()); + if(report != null) { + info.put("NumContainers", report.getNumContainers()); + info.put("UsedMemoryMB", report.getUsedResource().getMemory()); + info.put("AvailableMemoryMB", + report.getAvailableResource().getMemory()); + } nodesInfo.add(info); } From 4dfc8e60c296412d4008c1f072dec00626b4ba15 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 11 May 2012 23:36:09 +0000 Subject: [PATCH 67/70] Add HADOOP-8113 to CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337415 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index cd561606c96..4bf1758716c 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -319,6 +319,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts. (Tomohiko Kinebuchi via eli) + HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too + (not just MapReduce). Contributed by Eugene Koontz. + OPTIMIZATIONS BUG FIXES From 7c3dd7e3118c2b16618222b25eaa3b0e6e0e5dcf Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Sat, 12 May 2012 01:55:46 +0000 Subject: [PATCH 68/70] Move HADOOP-8285 and HADOOP-8366 to 2.0.0 in CHANGES.txt. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337431 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4bf1758716c..28603461f72 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -63,12 +63,8 @@ Trunk (unreleased changes) HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) - HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia) - HADOOP-8308. Support cross-project Jenkins builds. (tomwhite) - HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia) - BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. @@ -322,6 +318,10 @@ Release 2.0.0 - UNRELEASED HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too (not just MapReduce). Contributed by Eugene Koontz. + HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia) + + HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia) + OPTIMIZATIONS BUG FIXES From 3d1838228541b22e14f39a1b33d4b5679d74b40c Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Sat, 12 May 2012 01:57:50 +0000 Subject: [PATCH 69/70] Move HDFS-3211 to 2.0.0 in CHANGES.txt. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337432 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3dc9b698ebc..f9ee22a6c8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -368,9 +368,6 @@ Release 2.0.0 - UNRELEASED HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS. (Ravi Prakash via szetszwo) - HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo - and epoch in JournalProtocol. (suresh via szetszwo) - HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG (todd) @@ -454,6 +451,9 @@ Release 2.0.0 - UNRELEASED HDFS-3400. DNs should be able start with jsvc even if security is disabled. (atm via eli) + HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo + and epoch in JournalProtocol. (suresh via szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) From 810ae618fd2b308bc65a3264a90233658010380e Mon Sep 17 00:00:00 2001 From: Harsh J Date: Sat, 12 May 2012 06:03:39 +0000 Subject: [PATCH 70/70] HADOOP-8395. Text shell command unnecessarily demands that a SequenceFile's key class be WritableComparable (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1337449 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/fs/shell/Display.java | 5 ++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 28603461f72..018bef311a0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -133,6 +133,9 @@ Trunk (unreleased changes) HADOOP-8375. test-patch should stop immediately once it has found compilation errors (bobby) + HADOOP-8395. Text shell command unnecessarily demands that a + SequenceFile's key class be WritableComparable (harsh) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java index 8a05a55310e..59358632a77 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java @@ -34,7 +34,6 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.util.ReflectionUtils; @@ -136,7 +135,7 @@ class Display extends FsCommand { protected class TextRecordInputStream extends InputStream { SequenceFile.Reader r; - WritableComparable key; + Writable key; Writable val; DataInputBuffer inbuf; @@ -148,7 +147,7 @@ class Display extends FsCommand { r = new SequenceFile.Reader(lconf, SequenceFile.Reader.file(fpath)); key = ReflectionUtils.newInstance( - r.getKeyClass().asSubclass(WritableComparable.class), lconf); + r.getKeyClass().asSubclass(Writable.class), lconf); val = ReflectionUtils.newInstance( r.getValueClass().asSubclass(Writable.class), lconf); inbuf = new DataInputBuffer();