From 05b6dc647ec8bac1885dc6b89144719c653387d3 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 5 Nov 2012 15:43:54 +0000 Subject: [PATCH 01/14] Localization update git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1405846 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/yarn/util/FSDownload.java | 89 ++++++++++++++++++- .../hadoop/yarn/util/TestFSDownload.java | 79 ++++++++++------ 2 files changed, 142 insertions(+), 26 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java index 4d69056a70f..627d565d4d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.RunJar; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -90,6 +91,85 @@ private void createDir(Path path, FsPermission perm) throws IOException { } } + /** + * Returns a boolean to denote whether a cache file is visible to all(public) + * or not + * @param conf + * @param uri + * @return true if the path in the uri is visible to all, false otherwise + * @throws IOException + */ + private static boolean isPublic(FileSystem fs, Path current) throws IOException { + current = fs.makeQualified(current); + //the leaf level file should be readable by others + if (!checkPublicPermsForAll(fs, current, FsAction.READ_EXECUTE, FsAction.READ)) { + return false; + } + return ancestorsHaveExecutePermissions(fs, current.getParent()); + } + + private static boolean checkPublicPermsForAll(FileSystem fs, Path current, + FsAction dir, FsAction file) + throws IOException { + return checkPublicPermsForAll(fs, fs.getFileStatus(current), dir, file); + } + + private static boolean checkPublicPermsForAll(FileSystem fs, + FileStatus status, FsAction dir, FsAction file) + throws IOException { + FsPermission perms = status.getPermission(); + FsAction otherAction = perms.getOtherAction(); + if (status.isDirectory()) { + if (!otherAction.implies(dir)) { + return false; + } + + for (FileStatus child : fs.listStatus(status.getPath())) { + if(!checkPublicPermsForAll(fs, child, dir, file)) { + return false; + } + } + return true; + } + return (otherAction.implies(file)); + } + + /** + * Returns true if all ancestors of the specified path have the 'execute' + * permission set for all users (i.e. that other users can traverse + * the directory heirarchy to the given path) + */ + private static boolean ancestorsHaveExecutePermissions(FileSystem fs, Path path) + throws IOException { + Path current = path; + while (current != null) { + //the subdirs in the path should have execute permissions for others + if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE)) { + return false; + } + current = current.getParent(); + } + return true; + } + + /** + * Checks for a given path whether the Other permissions on it + * imply the permission in the passed FsAction + * @param fs + * @param path + * @param action + * @return true if the path in the uri is visible to all, false otherwise + * @throws IOException + */ + private static boolean checkPermissionOfOther(FileSystem fs, Path path, + FsAction action) throws IOException { + FileStatus status = fs.getFileStatus(path); + FsPermission perms = status.getPermission(); + FsAction otherAction = perms.getOtherAction(); + return otherAction.implies(action); + } + + private Path copy(Path sCopy, Path dstdir) throws IOException { FileSystem sourceFs = sCopy.getFileSystem(conf); Path dCopy = new Path(dstdir, sCopy.getName() + ".tmp"); @@ -99,7 +179,14 @@ private Path copy(Path sCopy, Path dstdir) throws IOException { " changed on src filesystem (expected " + resource.getTimestamp() + ", was " + sStat.getModificationTime()); } - + if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) { + if (!isPublic(sourceFs, sCopy)) { + throw new IOException("Resource " + sCopy + + " is not publicly accessable and as such cannot be part of the" + + " public cache."); + } + } + sourceFs.copyToLocalFile(sCopy, dCopy); return dCopy; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java index 25adf317016..c29b69f1148 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java @@ -113,6 +113,54 @@ static LocalResource createJar(FileContext files, Path p, return ret; } + @Test + public void testDownloadBadPublic() throws IOException, URISyntaxException, + InterruptedException { + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); + FileContext files = FileContext.getLocalFSFileContext(conf); + final Path basedir = files.makeQualified(new Path("target", + TestFSDownload.class.getSimpleName())); + files.mkdir(basedir, null, true); + conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); + + Map rsrcVis = + new HashMap(); + + Random rand = new Random(); + long sharedSeed = rand.nextLong(); + rand.setSeed(sharedSeed); + System.out.println("SEED: " + sharedSeed); + + Map> pending = + new HashMap>(); + ExecutorService exec = Executors.newSingleThreadExecutor(); + LocalDirAllocator dirs = + new LocalDirAllocator(TestFSDownload.class.getName()); + int size = 512; + LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; + Path path = new Path(basedir, "test-file"); + LocalResource rsrc = createFile(files, path, size, rand, vis); + rsrcVis.put(rsrc, vis); + Path destPath = dirs.getLocalPathForWrite( + basedir.toString(), size, conf); + FSDownload fsd = + new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, + destPath, rsrc, new Random(sharedSeed)); + pending.put(rsrc, exec.submit(fsd)); + + try { + for (Map.Entry> p : pending.entrySet()) { + p.getValue().get(); + Assert.fail("We localized a file that is not public."); + } + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof IOException); + } finally { + exec.shutdown(); + } + } + @Test public void testDownload() throws IOException, URISyntaxException, InterruptedException { @@ -140,14 +188,9 @@ public void testDownload() throws IOException, URISyntaxException, int[] sizes = new int[10]; for (int i = 0; i < 10; ++i) { sizes[i] = rand.nextInt(512) + 512; - LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; - switch (i%3) { - case 1: - vis = LocalResourceVisibility.PRIVATE; - break; - case 2: + LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE; + if (i%2 == 1) { vis = LocalResourceVisibility.APPLICATION; - break; } Path p = new Path(basedir, "" + i); LocalResource rsrc = createFile(files, p, sizes[i], rand, vis); @@ -176,17 +219,8 @@ public void testDownload() throws IOException, URISyntaxException, System.out.println("File permission " + perm + " for rsrc vis " + p.getKey().getVisibility().name()); assert(rsrcVis.containsKey(p.getKey())); - switch (rsrcVis.get(p.getKey())) { - case PUBLIC: - Assert.assertTrue("Public file should be 555", - perm.toShort() == FSDownload.PUBLIC_FILE_PERMS.toShort()); - break; - case PRIVATE: - case APPLICATION: - Assert.assertTrue("Private file should be 500", - perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort()); - break; - } + Assert.assertTrue("Private file should be 500", + perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort()); } } catch (ExecutionException e) { throw new IOException("Failed exec", e); @@ -250,14 +284,9 @@ public void testDirDownload() throws IOException, InterruptedException { LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName()); for (int i = 0; i < 5; ++i) { - LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; - switch (rand.nextInt()%3) { - case 1: - vis = LocalResourceVisibility.PRIVATE; - break; - case 2: + LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE; + if (i%2 == 1) { vis = LocalResourceVisibility.APPLICATION; - break; } Path p = new Path(basedir, "dir" + i + ".jar"); From 8303175db8e5b78ddb09005654cf1bc1a2d82037 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 5 Nov 2012 18:26:49 +0000 Subject: [PATCH 02/14] HADOOP-9009. Add SecurityUtil methods to get/set authentication method (daryn via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1405904 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../apache/hadoop/security/SecurityUtil.java | 21 +++++++ .../hadoop/security/UserGroupInformation.java | 8 +-- .../apache/hadoop/ipc/MiniRPCBenchmark.java | 5 +- .../java/org/apache/hadoop/ipc/TestRPC.java | 25 +++++++-- .../org/apache/hadoop/ipc/TestSaslRPC.java | 28 +++++----- .../security/TestDoAsEffectiveUser.java | 8 +-- .../hadoop/security/TestSecurityUtil.java | 56 +++++++++++++++++-- .../security/TestUGIWithSecurityOn.java | 4 +- 9 files changed, 119 insertions(+), 39 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index f1fbb54b382..846aef729d9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -333,6 +333,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-8985. Add namespace declarations in .proto files for languages other than java. (Binglin Chan via suresh) + HADOOP-9009. Add SecurityUtil methods to get/set authentication method + (daryn via bobby) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 66ffe20c6ba..717c54d713b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -16,6 +16,8 @@ */ package org.apache.hadoop.security; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; + import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -44,6 +46,7 @@ import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.ssl.SSLFactory; @@ -665,4 +668,22 @@ void setSearchDomains(String ... domains) { } } + public static AuthenticationMethod getAuthenticationMethod(Configuration conf) { + String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple"); + try { + return Enum.valueOf(AuthenticationMethod.class, value.toUpperCase()); + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException("Invalid attribute value for " + + HADOOP_SECURITY_AUTHENTICATION + " of " + value); + } + } + + public static void setAuthenticationMethod( + AuthenticationMethod authenticationMethod, Configuration conf) { + if (authenticationMethod == null) { + authenticationMethod = AuthenticationMethod.SIMPLE; + } + conf.set(HADOOP_SECURITY_AUTHENTICATION, + authenticationMethod.toString().toLowerCase()); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 7c3f6677b4f..4b9545032ad 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -236,15 +236,15 @@ private static synchronized void initialize(Configuration conf, boolean skipRule * @param conf the configuration to use */ private static synchronized void initUGI(Configuration conf) { - String value = conf.get(HADOOP_SECURITY_AUTHENTICATION); - if (value == null || "simple".equals(value)) { + AuthenticationMethod auth = SecurityUtil.getAuthenticationMethod(conf); + if (auth == AuthenticationMethod.SIMPLE) { useKerberos = false; - } else if ("kerberos".equals(value)) { + } else if (auth == AuthenticationMethod.KERBEROS) { useKerberos = true; } else { throw new IllegalArgumentException("Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION + - " of " + value); + " of " + auth); } try { kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong( diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java index 5130bad1a69..edf95d74809 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java @@ -30,7 +30,6 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.KerberosInfo; @@ -380,9 +379,7 @@ public static void main(String[] args) throws Exception { elapsedTime = mb.runMiniBenchmarkWithDelegationToken( conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY); } else { - String auth = - conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "simple"); + String auth = SecurityUtil.getAuthenticationMethod(conf).toString(); System.out.println( "Running MiniRPCBenchmark with " + auth + " authentication."); elapsedTime = mb.runMiniBenchmark( diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 732431d2a18..745eb792842 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -55,13 +55,16 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.MockitoUtil; +import org.junit.Before; import org.junit.Test; import com.google.protobuf.DescriptorProtos; @@ -75,11 +78,14 @@ public class TestRPC { public static final Log LOG = LogFactory.getLog(TestRPC.class); - private static Configuration conf = new Configuration(); + private static Configuration conf; - static { + @Before + public void setupConf() { + conf = new Configuration(); conf.setClass("rpc.engine." + StoppedProtocol.class.getName(), StoppedRpcEngine.class, RpcEngine.class); + UserGroupInformation.setConfiguration(conf); } int datasize = 1024*100; @@ -676,11 +682,17 @@ public void testWrappedStopProxy() throws IOException { @Test public void testErrorMsgForInsecureClient() throws Exception { - final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + Configuration serverConf = new Configuration(conf); + SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, + serverConf); + UserGroupInformation.setConfiguration(serverConf); + + final Server server = new RPC.Builder(serverConf).setProtocol(TestProtocol.class) .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) .setNumHandlers(5).setVerbose(true).build(); - server.enableSecurity(); server.start(); + + UserGroupInformation.setConfiguration(conf); boolean succeeded = false; final InetSocketAddress addr = NetUtils.getConnectAddress(server); TestProtocol proxy = null; @@ -702,17 +714,18 @@ public void testErrorMsgForInsecureClient() throws Exception { conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2); - final Server multiServer = new RPC.Builder(conf) + UserGroupInformation.setConfiguration(serverConf); + final Server multiServer = new RPC.Builder(serverConf) .setProtocol(TestProtocol.class).setInstance(new TestImpl()) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) .build(); - multiServer.enableSecurity(); multiServer.start(); succeeded = false; final InetSocketAddress mulitServerAddr = NetUtils.getConnectAddress(multiServer); proxy = null; try { + UserGroupInformation.setConfiguration(conf); proxy = (TestProtocol) RPC.getProxy(TestProtocol.class, TestProtocol.versionID, mulitServerAddr, conf); proxy.echo(""); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index f1b5fd67fd3..6a4684e7884 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -18,8 +18,9 @@ package org.apache.hadoop.ipc; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.*; import static org.junit.Assert.*; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -78,7 +79,7 @@ public class TestSaslRPC { @BeforeClass public static void setup() { conf = new Configuration(); - conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + SecurityUtil.setAuthenticationMethod(KERBEROS, conf); UserGroupInformation.setConfiguration(conf); } @@ -263,7 +264,6 @@ public void testSecureToInsecureRpc() throws Exception { Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class) .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) .setNumHandlers(5).setVerbose(true).build(); - server.disableSecurity(); TestTokenSecretManager sm = new TestTokenSecretManager(); doDigestRpc(server, sm); } @@ -345,7 +345,7 @@ public void testGetRemotePrincipal() throws Exception { new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf); assertEquals(SERVER_PRINCIPAL_1, remoteId.getServerPrincipal()); // this following test needs security to be off - newConf.set(HADOOP_SECURITY_AUTHENTICATION, "simple"); + SecurityUtil.setAuthenticationMethod(SIMPLE, newConf); UserGroupInformation.setConfiguration(newConf); remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf); @@ -536,15 +536,15 @@ private AuthenticationMethod getAuthMethod(final boolean isSecureClient, final boolean useToken ) throws Exception { + Configuration serverConf = new Configuration(conf); + SecurityUtil.setAuthenticationMethod( + isSecureServer ? KERBEROS : SIMPLE, serverConf); + UserGroupInformation.setConfiguration(serverConf); + TestTokenSecretManager sm = new TestTokenSecretManager(); - Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class) + Server server = new RPC.Builder(serverConf).setProtocol(TestSaslProtocol.class) .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); - if (isSecureServer) { - server.enableSecurity(); - } else { - server.disableSecurity(); - } server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); @@ -558,8 +558,10 @@ private AuthenticationMethod getAuthMethod(final boolean isSecureClient, current.addToken(token); } - conf.set(HADOOP_SECURITY_AUTHENTICATION, isSecureClient ? "kerberos" : "simple"); - UserGroupInformation.setConfiguration(conf); + final Configuration clientConf = new Configuration(conf); + SecurityUtil.setAuthenticationMethod( + isSecureClient ? KERBEROS : SIMPLE, clientConf); + UserGroupInformation.setConfiguration(clientConf); try { return current.doAs(new PrivilegedExceptionAction() { @Override @@ -567,7 +569,7 @@ public AuthenticationMethod run() throws IOException { TestSaslProtocol proxy = null; try { proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class, - TestSaslProtocol.versionID, addr, conf); + TestSaslProtocol.versionID, addr, clientConf); return proxy.getAuthMethod(); } finally { if (proxy != null) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index 529124eddf1..129d4a06d0a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -28,13 +28,13 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; @@ -416,8 +416,7 @@ public String run() throws IOException { public void testProxyWithToken() throws Exception { final Configuration conf = new Configuration(masterConf); TestTokenSecretManager sm = new TestTokenSecretManager(); - conf - .set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); UserGroupInformation.setConfiguration(conf); final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) @@ -471,8 +470,7 @@ public String run() throws Exception { public void testTokenBySuperUser() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); final Configuration newConf = new Configuration(masterConf); - newConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); + SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf); UserGroupInformation.setConfiguration(newConf); final Server server = new RPC.Builder(newConf) .setProtocol(TestProtocol.class).setInstance(new TestImpl()) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java index cc22796ab1f..f7096394133 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java @@ -16,6 +16,8 @@ */ package org.apache.hadoop.security; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.*; import static org.junit.Assert.*; import java.io.IOException; @@ -29,10 +31,19 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; public class TestSecurityUtil { + @BeforeClass + public static void unsetKerberosRealm() { + // prevent failures if kinit-ed or on os x with no realm + System.setProperty("java.security.krb5.kdc", ""); + System.setProperty("java.security.krb5.realm", "NONE"); + } + @Test public void isOriginalTGTReturnsCorrectValues() { assertTrue(SecurityUtil.isTGSPrincipal @@ -111,9 +122,7 @@ public void testLocalHostNameForNullOrWild() throws Exception { @Test public void testStartsWithIncorrectSettings() throws IOException { Configuration conf = new Configuration(); - conf.set( - org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); + SecurityUtil.setAuthenticationMethod(KERBEROS, conf); String keyTabKey="key"; conf.set(keyTabKey, ""); UserGroupInformation.setConfiguration(conf); @@ -256,7 +265,7 @@ void runBadPortPermutes(String arg, boolean validIfPosPort) { SecurityUtil.setTokenServiceUseIp(useIp); String serviceHost = useIp ? ip : host.toLowerCase(); - Token token = new Token(); + Token token = new Token(); Text service = new Text(serviceHost+":"+port); assertEquals(service, SecurityUtil.buildTokenService(addr)); @@ -345,4 +354,43 @@ public void testSocketAddrWithIPToStaticIP() { NetUtils.addStaticResolution(staticHost, "255.255.255.255"); verifyServiceAddr(staticHost, "255.255.255.255"); } + + @Test + public void testGetAuthenticationMethod() { + Configuration conf = new Configuration(); + // default is simple + conf.unset(HADOOP_SECURITY_AUTHENTICATION); + assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf)); + // simple + conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple"); + assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf)); + // kerberos + conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf)); + // bad value + conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom"); + String error = null; + try { + SecurityUtil.getAuthenticationMethod(conf); + } catch (Exception e) { + error = e.toString(); + } + assertEquals("java.lang.IllegalArgumentException: " + + "Invalid attribute value for " + + HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error); + } + + @Test + public void testSetAuthenticationMethod() { + Configuration conf = new Configuration(); + // default + SecurityUtil.setAuthenticationMethod(null, conf); + assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION)); + // simple + SecurityUtil.setAuthenticationMethod(SIMPLE, conf); + assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION)); + // kerberos + SecurityUtil.setAuthenticationMethod(KERBEROS, conf); + assertEquals("kerberos", conf.get(HADOOP_SECURITY_AUTHENTICATION)); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java index 3dc69783df9..a1585933515 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java @@ -21,7 +21,6 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.junit.Assume; import org.junit.Before; @@ -49,8 +48,7 @@ public void testLogin() throws IOException { String user1keyTabFilepath = System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab"; Configuration conf = new Configuration(); - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); + SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugiNn = UserGroupInformation From b1aa62a848646f78e019c74186d9696e9101afcf Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 5 Nov 2012 18:37:39 +0000 Subject: [PATCH 03/14] HADOOP-9010. Map UGI authenticationMethod to RPC authMethod (daryn via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1405910 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../java/org/apache/hadoop/ipc/Client.java | 4 +- .../java/org/apache/hadoop/ipc/Server.java | 4 +- .../apache/hadoop/security/SaslRpcServer.java | 12 ++--- .../hadoop/security/UserGroupInformation.java | 45 ++++++++++++++++--- .../security/TestUserGroupInformation.java | 13 +++++- 6 files changed, 62 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 846aef729d9..5cac902f0ea 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -336,6 +336,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-9009. Add SecurityUtil methods to get/set authentication method (daryn via bobby) + HADOOP-9010. Map UGI authenticationMethod to RPC authMethod (daryn via + bobby) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index ab8d6de8812..aea73b4c385 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -69,6 +69,7 @@ import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; @@ -295,8 +296,9 @@ public Connection(ConnectionId remoteId) throws IOException { } if (token != null) { - authMethod = AuthMethod.DIGEST; + authMethod = AuthenticationMethod.TOKEN.getAuthMethod(); } else if (UserGroupInformation.isSecurityEnabled()) { + // eventually just use the ticket's authMethod authMethod = AuthMethod.KERBEROS; } else { authMethod = AuthMethod.SIMPLE; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 0c4670431b9..efaf6028ea9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -1526,11 +1526,11 @@ private void processConnectionContext(byte[] buf) throws IOException { if (!useSasl) { user = protocolUser; if (user != null) { - user.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod); + user.setAuthenticationMethod(AuthMethod.SIMPLE); } } else { // user is authenticated - user.setAuthenticationMethod(authMethod.authenticationMethod); + user.setAuthenticationMethod(authMethod); //Now we check if this is a proxy user case. If the protocol user is //different from the 'user', it is a proxy user scenario. However, //this is not allowed if user authenticated with DIGEST. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index 31718628f22..31b4c35dae2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.SecretManager.InvalidToken; @@ -137,20 +136,17 @@ private SaslStatus(int state) { /** Authentication method */ @InterfaceStability.Evolving public static enum AuthMethod { - SIMPLE((byte) 80, "", AuthenticationMethod.SIMPLE), - KERBEROS((byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS), - DIGEST((byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); + SIMPLE((byte) 80, ""), + KERBEROS((byte) 81, "GSSAPI"), + DIGEST((byte) 82, "DIGEST-MD5"); /** The code for this method. */ public final byte code; public final String mechanismName; - public final AuthenticationMethod authenticationMethod; - private AuthMethod(byte code, String mechanismName, - AuthenticationMethod authMethod) { + private AuthMethod(byte code, String mechanismName) { this.code = code; this.mechanismName = mechanismName; - this.authenticationMethod = authMethod; } private static final int FIRST_CODE = values()[0].code; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 4b9545032ad..88f82912025 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -59,6 +59,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.security.token.Token; @@ -1019,13 +1020,34 @@ public static UserGroupInformation createRemoteUser(String user) { @InterfaceAudience.Public @InterfaceStability.Evolving public static enum AuthenticationMethod { - SIMPLE, - KERBEROS, - TOKEN, - CERTIFICATE, - KERBEROS_SSL, - PROXY; - } + // currently we support only one auth per method, but eventually a + // subtype is needed to differentiate, ex. if digest is token or ldap + SIMPLE(AuthMethod.SIMPLE), + KERBEROS(AuthMethod.KERBEROS), + TOKEN(AuthMethod.DIGEST), + CERTIFICATE(null), + KERBEROS_SSL(null), + PROXY(null); + + private final AuthMethod authMethod; + private AuthenticationMethod(AuthMethod authMethod) { + this.authMethod = authMethod; + } + + public AuthMethod getAuthMethod() { + return authMethod; + } + + public static AuthenticationMethod valueOf(AuthMethod authMethod) { + for (AuthenticationMethod value : values()) { + if (value.getAuthMethod() == authMethod) { + return value; + } + } + throw new IllegalArgumentException( + "no authentication method for " + authMethod); + } + }; /** * Create a proxy user using username of the effective user and the ugi of the @@ -1290,6 +1312,15 @@ void setAuthenticationMethod(AuthenticationMethod authMethod) { user.setAuthenticationMethod(authMethod); } + /** + * Sets the authentication method in the subject + * + * @param authMethod + */ + public void setAuthenticationMethod(AuthMethod authMethod) { + user.setAuthenticationMethod(AuthenticationMethod.valueOf(authMethod)); + } + /** * Get the authentication method from the subject * diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 9aebd4ea652..2acbceffd2d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -305,7 +305,6 @@ public void testAddCreds() throws Exception { assertSame(secret, ugi.getCredentials().getSecretKey(secretKey)); } - @SuppressWarnings("unchecked") // from Mockito mocks @Test public void testGetCredsNotSame() throws Exception { @@ -429,6 +428,18 @@ public Collection run() throws IOException { assertEquals(2, otherSet.size()); } + @Test + public void testTestAuthMethod() throws Exception { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + // verify the reverse mappings works + for (AuthenticationMethod am : AuthenticationMethod.values()) { + if (am.getAuthMethod() != null) { + ugi.setAuthenticationMethod(am.getAuthMethod()); + assertEquals(am, ugi.getAuthenticationMethod()); + } + } + } + @Test public void testUGIAuthMethod() throws Exception { final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); From 0eadfcfcf55bbc616cfba36988c78a8d46814624 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 5 Nov 2012 21:52:06 +0000 Subject: [PATCH 04/14] MAPREDUCE-4771. KeyFieldBasedPartitioner not partitioning properly when configured (jlowe via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1405975 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../partition/KeyFieldBasedPartitioner.java | 1 + .../lib/TestKeyFieldBasedPartitioner.java | 23 +++++++++++++++---- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e7e7ef12193..644796d4bd8 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -631,6 +631,9 @@ Release 0.23.5 - UNRELEASED MAPREDUCE-4763 repair test TestUmbilicalProtocolWithJobToken (Ivan A. Veselovsky via bobby) + + MAPREDUCE-4771. KeyFieldBasedPartitioner not partitioning properly when + configured (jlowe via bobby) Release 0.23.4 - UNRELEASED diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java index 0927c1a1766..44cb624c5a8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java @@ -63,6 +63,7 @@ public class KeyFieldBasedPartitioner extends Partitioner public void setConf(Configuration conf) { this.conf = conf; + keyFieldHelper = new KeyFieldHelper(); String keyFieldSeparator = conf.get(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR, "\t"); keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java index 2ac80d2bba0..02b0507742a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java @@ -17,17 +17,18 @@ */ package org.apache.hadoop.mapred.lib; +import static org.junit.Assert.assertEquals; + import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner; +import org.junit.Test; -import junit.framework.TestCase; - -public class TestKeyFieldBasedPartitioner extends TestCase { +public class TestKeyFieldBasedPartitioner { /** * Test is key-field-based partitioned works with empty key. */ + @Test public void testEmptyKey() throws Exception { KeyFieldBasedPartitioner kfbp = new KeyFieldBasedPartitioner(); @@ -37,4 +38,18 @@ public void testEmptyKey() throws Exception { assertEquals("Empty key should map to 0th partition", 0, kfbp.getPartition(new Text(), new Text(), 10)); } + + @Test + public void testMultiConfigure() { + KeyFieldBasedPartitioner kfbp = + new KeyFieldBasedPartitioner(); + JobConf conf = new JobConf(); + conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k1,1"); + kfbp.setConf(conf); + Text key = new Text("foo\tbar"); + Text val = new Text("val"); + int partNum = kfbp.getPartition(key, val, 4096); + kfbp.configure(conf); + assertEquals(partNum, kfbp.getPartition(key,val, 4096)); + } } \ No newline at end of file From 7ee5ce3176a74d217551b5981f809a56c719424b Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 5 Nov 2012 23:26:34 +0000 Subject: [PATCH 05/14] HDFS-4151. Change the methods in FSDirectory to pass INodesInPath instead of INode[] as a parameter. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406006 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 +- .../hdfs/server/namenode/FSDirectory.java | 198 ++++++++---------- .../hdfs/server/namenode/FSNamesystem.java | 34 ++- .../server/namenode/FSPermissionChecker.java | 2 +- .../hdfs/server/namenode/INodeDirectory.java | 12 +- 5 files changed, 114 insertions(+), 137 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 297f8f8feb2..fdf1ba2cc55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -146,11 +146,14 @@ Trunk (Unreleased) HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh) HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable - returningmore than INode array. (Jing Zhao via suresh) + returning more than INode array. (Jing Zhao via suresh) HDFS-4129. Add utility methods to dump NameNode in memory tree for testing. (szetszwo via suresh) + HDFS-4151. Change the methods in FSDirectory to pass INodesInPath instead + of INode[] as a parameter. (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 8b17d4608f1..03d9e4387f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -330,22 +330,18 @@ INodeDirectory addToParent(byte[] src, INodeDirectory parentINode, /** * Add a block to the file. Returns a reference to the added block. */ - BlockInfo addBlock(String path, - INode[] inodes, - Block block, - DatanodeDescriptor targets[] - ) throws QuotaExceededException { + BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block, + DatanodeDescriptor targets[]) throws IOException { waitForReady(); writeLock(); try { - assert inodes[inodes.length-1].isUnderConstruction() : - "INode should correspond to a file under construction"; - INodeFileUnderConstruction fileINode = - (INodeFileUnderConstruction)inodes[inodes.length-1]; + final INode[] inodes = inodesInPath.getINodes(); + final INodeFileUnderConstruction fileINode = + INodeFileUnderConstruction.valueOf(inodes[inodes.length-1], path); // check quota limits and updated space consumed - updateCount(inodes, inodes.length-1, 0, + updateCount(inodesInPath, inodes.length-1, 0, fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); // associate new last block for the file @@ -441,8 +437,9 @@ void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode, } // update space consumed - INode[] pathINodes = getExistingPathINodes(path); - updateCount(pathINodes, pathINodes.length-1, 0, + final INodesInPath inodesInPath = rootDir.getExistingPathINodes(path, true); + final INode[] inodes = inodesInPath.getINodes(); + updateCount(inodesInPath, inodes.length-1, 0, -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); } @@ -510,7 +507,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) throws QuotaExceededException, UnresolvedLinkException, FileAlreadyExistsException { assert hasWriteLock(); - INode[] srcInodes = rootDir.getExistingPathINodes(src, false); + INodesInPath srcInodesInPath = rootDir.getExistingPathINodes(src, false); + INode[] srcInodes = srcInodesInPath.getINodes(); INode srcInode = srcInodes[srcInodes.length-1]; // check the validation of the source @@ -573,7 +571,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) String srcChildName = null; try { // remove src - srcChild = removeChild(srcInodes, srcInodes.length-1); + srcChild = removeChild(srcInodesInPath, srcInodes.length-1); if (srcChild == null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst @@ -584,7 +582,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) srcChild.setLocalName(dstComponents[dstInodes.length-1]); // add src to the destination - dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, + dstChild = addChildNoQuotaCheck(dstInodesInPath, dstInodes.length-1, srcChild, UNKNOWN_DISK_SPACE); if (dstChild != null) { srcChild = null; @@ -601,7 +599,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) if (dstChild == null && srcChild != null) { // put it back srcChild.setLocalName(srcChildName); - addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, + addChildNoQuotaCheck(srcInodesInPath, srcInodes.length - 1, srcChild, UNKNOWN_DISK_SPACE); } } @@ -634,7 +632,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, } } String error = null; - final INode[] srcInodes = rootDir.getExistingPathINodes(src, false); + final INodesInPath srcInodesInPath = rootDir.getExistingPathINodes(src, false); + final INode[] srcInodes = srcInodesInPath.getINodes(); final INode srcInode = srcInodes[srcInodes.length - 1]; // validate source if (srcInode == null) { @@ -720,7 +719,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, // Ensure dst has quota to accommodate rename verifyQuotaForRename(srcInodes, dstInodes); - INode removedSrc = removeChild(srcInodes, srcInodes.length - 1); + INode removedSrc = removeChild(srcInodesInPath, srcInodes.length - 1); if (removedSrc == null) { error = "Failed to rename " + src + " to " + dst + " because the source can not be removed"; @@ -733,14 +732,14 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, INode removedDst = null; try { if (dstInode != null) { // dst exists remove it - removedDst = removeChild(dstInodes, dstInodes.length - 1); + removedDst = removeChild(dstInodesInPath, dstInodes.length - 1); dstChildName = removedDst.getLocalName(); } INode dstChild = null; removedSrc.setLocalName(dstComponents[dstInodes.length - 1]); // add src as dst to complete rename - dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, + dstChild = addChildNoQuotaCheck(dstInodesInPath, dstInodes.length - 1, removedSrc, UNKNOWN_DISK_SPACE); int filesDeleted = 0; @@ -768,13 +767,13 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, if (removedSrc != null) { // Rename failed - restore src removedSrc.setLocalName(srcChildName); - addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, removedSrc, + addChildNoQuotaCheck(srcInodesInPath, srcInodes.length - 1, removedSrc, UNKNOWN_DISK_SPACE); } if (removedDst != null) { // Rename failed - restore dst removedDst.setLocalName(dstChildName); - addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, removedDst, + addChildNoQuotaCheck(dstInodesInPath, dstInodes.length - 1, removedDst, UNKNOWN_DISK_SPACE); } } @@ -814,7 +813,8 @@ Block[] unprotectedSetReplication(String src, UnresolvedLinkException { assert hasWriteLock(); - INode[] inodes = rootDir.getExistingPathINodes(src, true); + final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, true); + final INode[] inodes = inodesInPath.getINodes(); INode inode = inodes[inodes.length - 1]; if (inode == null) { return null; @@ -828,7 +828,7 @@ Block[] unprotectedSetReplication(String src, // check disk quota long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); - updateCount(inodes, inodes.length-1, 0, dsDelta, true); + updateCount(inodesInPath, inodes.length-1, 0, dsDelta, true); fileNode.setReplication(replication); @@ -958,7 +958,8 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp) } // do the move - INode [] trgINodes = getExistingPathINodes(target); + final INodesInPath trgINodesInPath = rootDir.getExistingPathINodes(target, true); + final INode[] trgINodes = trgINodesInPath.getINodes(); INodeFile trgInode = (INodeFile) trgINodes[trgINodes.length-1]; INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2]; @@ -985,7 +986,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp) trgInode.setModificationTimeForce(timestamp); trgParent.setModificationTime(timestamp); // update quota on the parent directory ('count' files removed, 0 space) - unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0); + unprotectedUpdateCount(trgINodesInPath, trgINodes.length-1, -count, 0); } /** @@ -1068,7 +1069,8 @@ int unprotectedDelete(String src, List collectedBlocks, assert hasWriteLock(); src = normalizePath(src); - INode[] inodes = rootDir.getExistingPathINodes(src, false); + final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, false); + final INode[] inodes = inodesInPath.getINodes(); INode targetNode = inodes[inodes.length-1]; if (targetNode == null) { // non-existent src @@ -1086,7 +1088,7 @@ int unprotectedDelete(String src, List collectedBlocks, } int pos = inodes.length - 1; // Remove the node from the namespace - targetNode = removeChild(inodes, pos); + targetNode = removeChild(inodesInPath, pos); if (targetNode == null) { return 0; } @@ -1227,28 +1229,6 @@ INode getINode(String src) throws UnresolvedLinkException { readUnlock(); } } - - /** - * Retrieve the existing INodes along the given path. - * - * @param path the path to explore - * @return INodes array containing the existing INodes in the order they - * appear when following the path from the root INode to the - * deepest INodes. The array size will be the number of expected - * components in the path, and non existing components will be - * filled with null - * - * @see INodeDirectory#getExistingPathINodes(byte[][], INode[]) - */ - INode[] getExistingPathINodes(String path) - throws UnresolvedLinkException { - readLock(); - try { - return rootDir.getExistingPathINodes(path, true); - } finally { - readUnlock(); - } - } /** * Get the parent node of path. @@ -1314,13 +1294,14 @@ void updateSpaceConsumed(String path, long nsDelta, long dsDelta) UnresolvedLinkException { writeLock(); try { - INode[] inodes = rootDir.getExistingPathINodes(path, false); + final INodesInPath inodesInPath = rootDir.getExistingPathINodes(path, false); + final INode[] inodes = inodesInPath.getINodes(); int len = inodes.length; if (inodes[len - 1] == null) { throw new FileNotFoundException(path + " does not exist under rootDir."); } - updateCount(inodes, len-1, nsDelta, dsDelta, true); + updateCount(inodesInPath, len-1, nsDelta, dsDelta, true); } finally { writeUnlock(); } @@ -1335,7 +1316,7 @@ void updateSpaceConsumed(String path, long nsDelta, long dsDelta) * @param checkQuota if true then check if quota is exceeded * @throws QuotaExceededException if the new count violates any quota limit */ - private void updateCount(INode[] inodes, int numOfINodes, + private void updateCount(INodesInPath inodesInPath, int numOfINodes, long nsDelta, long dsDelta, boolean checkQuota) throws QuotaExceededException { assert hasWriteLock(); @@ -1343,29 +1324,25 @@ private void updateCount(INode[] inodes, int numOfINodes, //still initializing. do not check or update quotas. return; } - if (numOfINodes>inodes.length) { + final INode[] inodes = inodesInPath.getINodes(); + if (numOfINodes > inodes.length) { numOfINodes = inodes.length; } if (checkQuota) { verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); } - for(int i = 0; i < numOfINodes; i++) { - if (inodes[i].isQuotaSet()) { // a directory with quota - INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; - node.updateNumItemsInTree(nsDelta, dsDelta); - } - } + unprotectedUpdateCount(inodesInPath, numOfINodes, nsDelta, dsDelta); } /** * update quota of each inode and check to see if quota is exceeded. * See {@link #updateCount(INode[], int, long, long, boolean)} */ - private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, - long nsDelta, long dsDelta) { + private void updateCountNoQuotaCheck(INodesInPath inodesInPath, + int numOfINodes, long nsDelta, long dsDelta) { assert hasWriteLock(); try { - updateCount(inodes, numOfINodes, nsDelta, dsDelta, false); + updateCount(inodesInPath, numOfINodes, nsDelta, dsDelta, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e); } @@ -1379,9 +1356,10 @@ private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, * @param nsDelta * @param dsDelta */ - void unprotectedUpdateCount(INode[] inodes, int numOfINodes, - long nsDelta, long dsDelta) { - assert hasWriteLock(); + private void unprotectedUpdateCount(INodesInPath inodesInPath, + int numOfINodes, long nsDelta, long dsDelta) { + assert hasWriteLock(); + final INode[] inodes = inodesInPath.getINodes(); for(int i=0; i < numOfINodes; i++) { if (inodes[i].isQuotaSet()) { // a directory with quota INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; @@ -1458,7 +1436,7 @@ boolean mkdirs(String src, PermissionStatus permissions, StringBuilder pathbuilder = new StringBuilder(); int i = 1; for(; i < inodes.length && inodes[i] != null; i++) { - pathbuilder.append(Path.SEPARATOR + names[i]); + pathbuilder.append(Path.SEPARATOR).append(names[i]); if (!inodes[i].isDirectory()) { throw new FileAlreadyExistsException("Parent path is not a directory: " + pathbuilder+ " "+inodes[i].getLocalName()); @@ -1500,8 +1478,7 @@ boolean mkdirs(String src, PermissionStatus permissions, // create directories beginning from the first null index for(; i < inodes.length; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); - String cur = pathbuilder.toString(); - unprotectedMkdir(inodes, i, components[i], + unprotectedMkdir(inodesInPath, i, components[i], (i < lastInodeIndex) ? parentPermissions : permissions, now); if (inodes[i] == null) { return false; @@ -1510,6 +1487,8 @@ boolean mkdirs(String src, PermissionStatus permissions, // to match count of FilesDeleted metric. if (getFSNamesystem() != null) NameNode.getNameNodeMetrics().incrFilesCreated(); + + final String cur = pathbuilder.toString(); fsImage.getEditLog().logMkDir(cur, inodes[i]); if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( @@ -1530,30 +1509,30 @@ INode unprotectedMkdir(String src, PermissionStatus permissions, INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, components.length, false); INode[] inodes = inodesInPath.getINodes(); - unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], - permissions, timestamp); - return inodes[inodes.length-1]; + final int pos = inodes.length - 1; + unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp); + return inodes[pos]; } /** create a directory at index pos. * The parent path to the directory is at [0, pos-1]. * All ancestors exist. Newly created one stored at index pos. */ - private void unprotectedMkdir(INode[] inodes, int pos, + private void unprotectedMkdir(INodesInPath inodesInPath, int pos, byte[] name, PermissionStatus permission, long timestamp) throws QuotaExceededException { assert hasWriteLock(); - inodes[pos] = addChild(inodes, pos, - new INodeDirectory(name, permission, timestamp), - -1); + final INodeDirectory dir = new INodeDirectory(name, permission, timestamp); + final INode inode = addChild(inodesInPath, pos, dir, -1, true); + inodesInPath.setINode(pos, inode); } /** Add a node child to the namespace. The full path name of the node is src. * childDiskspace should be -1, if unknown. - * QuotaExceededException is thrown if it violates quota limit */ - private T addNode(String src, T child, - long childDiskspace) - throws QuotaExceededException, UnresolvedLinkException { + * @throw QuotaExceededException is thrown if it violates quota limit + */ + private T addNode(String src, T child, long childDiskspace + ) throws QuotaExceededException, UnresolvedLinkException { byte[][] components = INode.getPathComponents(src); byte[] path = components[components.length-1]; child.setLocalName(path); @@ -1562,8 +1541,8 @@ private T addNode(String src, T child, try { INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, components.length, false); - INode[] inodes = inodesInPath.getINodes(); - return addChild(inodes, inodes.length-1, child, childDiskspace); + return addChild(inodesInPath, inodesInPath.getINodes().length-1, child, + childDiskspace, true); } finally { writeUnlock(); } @@ -1688,19 +1667,22 @@ protected void verifyFsLimits(INode[] pathComponents, } /** Add a node child to the inodes at index pos. - * Its ancestors are stored at [0, pos-1]. - * QuotaExceededException is thrown if it violates quota limit */ - private T addChild(INode[] pathComponents, int pos, + * Its ancestors are stored at [0, pos-1]. + * @return the added node. + * @throw QuotaExceededException is thrown if it violates quota limit + */ + private T addChild(INodesInPath inodesInPath, int pos, T child, long childDiskspace, boolean checkQuota) throws QuotaExceededException { - // The filesystem limits are not really quotas, so this check may appear - // odd. It's because a rename operation deletes the src, tries to add - // to the dest, if that fails, re-adds the src from whence it came. - // The rename code disables the quota when it's restoring to the - // original location becase a quota violation would cause the the item - // to go "poof". The fs limits must be bypassed for the same reason. + final INode[] inodes = inodesInPath.getINodes(); + // The filesystem limits are not really quotas, so this check may appear + // odd. It's because a rename operation deletes the src, tries to add + // to the dest, if that fails, re-adds the src from whence it came. + // The rename code disables the quota when it's restoring to the + // original location becase a quota violation would cause the the item + // to go "poof". The fs limits must be bypassed for the same reason. if (checkQuota) { - verifyFsLimits(pathComponents, pos, child); + verifyFsLimits(inodes, pos, child); } INode.DirCounts counts = new INode.DirCounts(); @@ -1708,31 +1690,22 @@ private T addChild(INode[] pathComponents, int pos, if (childDiskspace < 0) { childDiskspace = counts.getDsCount(); } - updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace, - checkQuota); - if (pathComponents[pos-1] == null) { + updateCount(inodesInPath, pos, counts.getNsCount(), childDiskspace, checkQuota); + if (inodes[pos-1] == null) { throw new NullPointerException("Panic: parent does not exist"); } - T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild( - child, true); + final T addedNode = ((INodeDirectory)inodes[pos-1]).addChild(child, true); if (addedNode == null) { - updateCount(pathComponents, pos, -counts.getNsCount(), - -childDiskspace, true); + updateCount(inodesInPath, pos, -counts.getNsCount(), -childDiskspace, true); } return addedNode; } - - private T addChild(INode[] pathComponents, int pos, - T child, long childDiskspace) - throws QuotaExceededException { - return addChild(pathComponents, pos, child, childDiskspace, true); - } - private T addChildNoQuotaCheck(INode[] pathComponents, + private T addChildNoQuotaCheck(INodesInPath inodesInPath, int pos, T child, long childDiskspace) { T inode = null; try { - inode = addChild(pathComponents, pos, child, childDiskspace, false); + inode = addChild(inodesInPath, pos, child, childDiskspace, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); } @@ -1744,13 +1717,13 @@ private T addChildNoQuotaCheck(INode[] pathComponents, * Count of each ancestor with quota is also updated. * Return the removed node; null if the removal fails. */ - private INode removeChild(INode[] pathComponents, int pos) { - INode removedNode = - ((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]); + private INode removeChild(final INodesInPath inodesInPath, int pos) { + final INode[] inodes = inodesInPath.getINodes(); + INode removedNode = ((INodeDirectory)inodes[pos-1]).removeChild(inodes[pos]); if (removedNode != null) { INode.DirCounts counts = new INode.DirCounts(); removedNode.spaceConsumedInTree(counts); - updateCountNoQuotaCheck(pathComponents, pos, + updateCountNoQuotaCheck(inodesInPath, pos, -counts.getNsCount(), -counts.getDsCount()); } return removedNode; @@ -1885,7 +1858,8 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) String srcs = normalizePath(src); - INode[] inodes = rootDir.getExistingPathINodes(src, true); + final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, true); + final INode[] inodes = inodesInPath.getINodes(); INode targetNode = inodes[inodes.length-1]; if (targetNode == null) { throw new FileNotFoundException("Directory does not exist: " + srcs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d8e84586682..320c437bea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -159,6 +159,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Util; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState; @@ -1666,7 +1667,7 @@ long getPreferredBlockSize(String filename) } } - /* + /** * Verify that parent directory of src exists. */ private void verifyParentDir(String src) throws FileNotFoundException, @@ -1674,14 +1675,13 @@ private void verifyParentDir(String src) throws FileNotFoundException, assert hasReadOrWriteLock(); Path parent = new Path(src).getParent(); if (parent != null) { - INode[] pathINodes = dir.getExistingPathINodes(parent.toString()); - INode parentNode = pathINodes[pathINodes.length - 1]; + final INode parentNode = dir.getINode(parent.toString()); if (parentNode == null) { throw new FileNotFoundException("Parent directory doesn't exist: " - + parent.toString()); + + parent); } else if (!parentNode.isDirectory() && !parentNode.isSymlink()) { throw new ParentNotDirectoryException("Parent path is not a directory: " - + parent.toString()); + + parent); } } } @@ -2200,18 +2200,18 @@ LocatedBlock getAdditionalBlock(String src, if (isInSafeMode()) { throw new SafeModeException("Cannot add block to " + src, safeMode); } - INode[] pathINodes = dir.getExistingPathINodes(src); - int inodesLen = pathINodes.length; - checkLease(src, clientName, pathINodes[inodesLen-1]); - INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) - pathINodes[inodesLen - 1]; + + final INodesInPath inodesInPath = dir.rootDir.getExistingPathINodes(src, true); + final INode[] inodes = inodesInPath.getINodes(); + final INodeFileUnderConstruction pendingFile + = checkLease(src, clientName, inodes[inodes.length - 1]); if (!checkFileProgress(pendingFile, false)) { throw new NotReplicatedYetException("Not replicated yet:" + src); } // allocate new block record block locations in INode. - newBlock = allocateBlock(src, pathINodes, targets); + newBlock = allocateBlock(src, inodesInPath, targets); for (DatanodeDescriptor dn : targets) { dn.incBlocksScheduled(); @@ -2422,14 +2422,12 @@ private boolean completeFileInternal(String src, * Allocate a block at the given pending filename * * @param src path to the file - * @param inodes INode representing each of the components of src. - * inodes[inodes.length-1] is the INode for the file. - * + * @param inodesInPath representing each of the components of src. + * The last INode is the INode for the file. * @throws QuotaExceededException If addition of block exceeds space quota */ - private Block allocateBlock(String src, INode[] inodes, - DatanodeDescriptor targets[]) throws QuotaExceededException, - SafeModeException { + private Block allocateBlock(String src, INodesInPath inodesInPath, + DatanodeDescriptor targets[]) throws IOException { assert hasWriteLock(); Block b = new Block(DFSUtil.getRandom().nextLong(), 0, 0); while(isValidBlock(b)) { @@ -2438,7 +2436,7 @@ private Block allocateBlock(String src, INode[] inodes, // Increment the generation stamp for every new block. nextGenerationStamp(); b.setGenerationStamp(getGenerationStamp()); - b = dir.addBlock(src, inodes, b, targets); + b = dir.addBlock(src, inodesInPath, b, targets); NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". " + blockPoolId + " " + b); return b; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 5fb1d8049fc..91ebc968a04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -121,7 +121,7 @@ void checkPermission(String path, INodeDirectory root, boolean doCheckOwner, } // check if (parentAccess != null) && file exists, then check sb // Resolve symlinks, the check is performed on the link target. - INode[] inodes = root.getExistingPathINodes(path, true); + final INode[] inodes = root.getExistingPathINodes(path, true).getINodes(); int ancestorIndex = inodes.length - 2; for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index fd913403497..ef3a9405114 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -244,14 +244,12 @@ INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes, * components in the path, and non existing components will be * filled with null * - * @see #getExistingPathINodes(byte[][], INode[]) + * @see #getExistingPathINodes(byte[][], int, boolean) */ - INode[] getExistingPathINodes(String path, boolean resolveLink) + INodesInPath getExistingPathINodes(String path, boolean resolveLink) throws UnresolvedLinkException { byte[][] components = getPathComponents(path); - INodesInPath inodes = this.getExistingPathINodes(components, - components.length, resolveLink); - return inodes.inodes; + return getExistingPathINodes(components, components.length, resolveLink); } /** @@ -460,6 +458,10 @@ public INodesInPath(int number) { INode[] getINodes() { return inodes; } + + void setINode(int i, INode inode) { + inodes[i] = inode; + } } /* From e0ce1b247550c6c89c292fb328c91d4b091a1473 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 5 Nov 2012 23:49:33 +0000 Subject: [PATCH 06/14] HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in C/C++. Contributed by Binglin Chang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406011 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java | 4 ++-- .../protocol/datatransfer/DataTransferProtoUtil.java | 4 ++-- .../org/apache/hadoop/hdfs/protocolPB/PBHelper.java | 7 +++---- .../hadoop-hdfs/src/main/proto/datatransfer.proto | 2 +- .../hadoop-hdfs/src/main/proto/hdfs.proto | 10 ++++++---- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java index 5bd8a0806f3..fe7446f6740 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java @@ -157,11 +157,11 @@ public static DatanodeInfo[] fromProtos( } public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) { - return DataChecksum.Type.valueOf(type.name()); + return DataChecksum.Type.valueOf(type.getNumber()); } public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) { - return HdfsProtos.ChecksumTypeProto.valueOf(type.name()); + return HdfsProtos.ChecksumTypeProto.valueOf(type.id); } public static InputStream vintPrefixed(final InputStream input) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java index dd9beb535bc..d2b04ed2bdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java @@ -52,7 +52,7 @@ static OpWriteBlockProto.BlockConstructionStage toProto( } public static ChecksumProto toProto(DataChecksum checksum) { - ChecksumTypeProto type = ChecksumTypeProto.valueOf(checksum.getChecksumType().name()); + ChecksumTypeProto type = HdfsProtoUtil.toProto(checksum.getChecksumType()); if (type == null) { throw new IllegalArgumentException( "Can't convert checksum to protobuf: " + checksum); @@ -68,7 +68,7 @@ public static DataChecksum fromProto(ChecksumProto proto) { if (proto == null) return null; int bytesPerChecksum = proto.getBytesPerChecksum(); - DataChecksum.Type type = DataChecksum.Type.valueOf(proto.getType().name()); + DataChecksum.Type type = HdfsProtoUtil.fromProto(proto.getType()); return DataChecksum.newDataChecksum(type, bytesPerChecksum); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 2d33558ede3..0603d15dd87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; +import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; @@ -67,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; @@ -129,7 +129,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.security.token.Token; import com.google.protobuf.ByteString; @@ -961,7 +960,7 @@ public static FsServerDefaults convert(FsServerDefaultsProto fs) { fs.getFileBufferSize(), fs.getEncryptDataTransfer(), fs.getTrashInterval(), - DataChecksum.Type.valueOf(fs.getChecksumType().name())); + HdfsProtoUtil.fromProto(fs.getChecksumType())); } public static FsServerDefaultsProto convert(FsServerDefaults fs) { @@ -974,7 +973,7 @@ public static FsServerDefaultsProto convert(FsServerDefaults fs) { .setFileBufferSize(fs.getFileBufferSize()) .setEncryptDataTransfer(fs.getEncryptDataTransfer()) .setTrashInterval(fs.getTrashInterval()) - .setChecksumType(ChecksumTypeProto.valueOf(fs.getChecksumType().name())) + .setChecksumType(HdfsProtoUtil.toProto(fs.getChecksumType())) .build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto index ae42c8358b3..d202f79a97a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto @@ -181,5 +181,5 @@ message OpBlockChecksumResponseProto { required uint32 bytesPerCrc = 1; required uint64 crcPerBlock = 2; required bytes md5 = 3; - optional ChecksumTypeProto crcType = 4 [default = CRC32]; + optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 43f373b8c2e..16b149efb88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -181,11 +181,13 @@ message HdfsFileStatusProto { /** * Checksum algorithms/types used in HDFS + * Make sure this enum's integer values match enum values' id properties defined + * in org.apache.hadoop.util.DataChecksum.Type */ enum ChecksumTypeProto { - NULL = 0; - CRC32 = 1; - CRC32C = 2; + CHECKSUM_NULL = 0; + CHECKSUM_CRC32 = 1; + CHECKSUM_CRC32C = 2; } /** @@ -199,7 +201,7 @@ message FsServerDefaultsProto { required uint32 fileBufferSize = 5; optional bool encryptDataTransfer = 6 [default = false]; optional uint64 trashInterval = 7 [default = 0]; - optional ChecksumTypeProto checksumType = 8 [default = CRC32]; + optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; } From 159e507995d7f99ca0c32d8b1b933772d47740a4 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 5 Nov 2012 23:50:29 +0000 Subject: [PATCH 07/14] HDFS-4046. Adding the missed file in revision 1406011 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406012 13f79535-47bb-0310-9956-ffa450edef68 --- .../hdfs/protocol/TestHdfsProtoUtil.java | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java new file mode 100644 index 00000000000..0a04e3c521f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.util.DataChecksum; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestHdfsProtoUtil { + @Test + public void testChecksumTypeProto() { + assertEquals(DataChecksum.Type.NULL, + HdfsProtoUtil.fromProto(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); + assertEquals(DataChecksum.Type.CRC32, + HdfsProtoUtil.fromProto(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); + assertEquals(DataChecksum.Type.CRC32C, + HdfsProtoUtil.fromProto(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); + assertEquals(HdfsProtoUtil.toProto(DataChecksum.Type.NULL), + HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL); + assertEquals(HdfsProtoUtil.toProto(DataChecksum.Type.CRC32), + HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32); + assertEquals(HdfsProtoUtil.toProto(DataChecksum.Type.CRC32C), + HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); + } +} From dca8dd7a20189c8faae46c44f3ec747ef4ca5103 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 6 Nov 2012 00:22:23 +0000 Subject: [PATCH 08/14] Add HDFS-4046 to Release 2.0.3 section git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406019 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fdf1ba2cc55..63a9efe6ad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -451,6 +451,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4143. Change blocks to private in INodeFile and renames isLink() to isSymlink() in INode. (szetszwo) + HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in + C/C++. (Binglin Chang via suresh) + OPTIMIZATIONS BUG FIXES From 5605b54010b67785085192629d9a191e0c79bd90 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 6 Nov 2012 15:37:14 +0000 Subject: [PATCH 09/14] HADOOP-9012. IPC Client sends wrong connection context (daryn via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406184 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../java/org/apache/hadoop/ipc/Client.java | 20 +- .../org/apache/hadoop/ipc/TestSaslRPC.java | 211 ++++++++++-------- 3 files changed, 129 insertions(+), 104 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5cac902f0ea..e349cb4526a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -395,6 +395,8 @@ Release 2.0.3-alpha - Unreleased HADOOP-8713. TestRPCCompatibility fails intermittently with JDK7 (Trevor Robinson via tgraves) + HADOOP-9012. IPC Client sends wrong connection context (daryn via bobby) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index aea73b4c385..1af14f941c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -223,7 +223,6 @@ public synchronized Writable getRpcResult() { private class Connection extends Thread { private InetSocketAddress server; // server ip:port private String serverPrincipal; // server's krb5 principal name - private IpcConnectionContextProto connectionContext; // connection context private final ConnectionId remoteId; // connection id private AuthMethod authMethod; // authentication method private Token token; @@ -304,9 +303,6 @@ public Connection(ConnectionId remoteId) throws IOException { authMethod = AuthMethod.SIMPLE; } - connectionContext = ProtoUtil.makeIpcConnectionContext( - RPC.getProtocolName(protocol), ticket, authMethod); - if (LOG.isDebugEnabled()) LOG.debug("Use " + authMethod + " authentication for protocol " + protocol.getSimpleName()); @@ -607,11 +603,6 @@ public Boolean run() throws IOException { } else { // fall back to simple auth because server told us so. authMethod = AuthMethod.SIMPLE; - // remake the connectionContext - connectionContext = ProtoUtil.makeIpcConnectionContext( - connectionContext.getProtocol(), - ProtoUtil.getUgi(connectionContext.getUserInfo()), - authMethod); } } @@ -622,7 +613,7 @@ public Boolean run() throws IOException { this.in = new DataInputStream(new BufferedInputStream(inStream)); } this.out = new DataOutputStream(new BufferedOutputStream(outStream)); - writeConnectionContext(); + writeConnectionContext(remoteId, authMethod); // update last activity time touch(); @@ -744,10 +735,15 @@ private void writeConnectionHeader(OutputStream outStream) /* Write the connection context header for each connection * Out is not synchronized because only the first thread does this. */ - private void writeConnectionContext() throws IOException { + private void writeConnectionContext(ConnectionId remoteId, + AuthMethod authMethod) + throws IOException { // Write out the ConnectionHeader DataOutputBuffer buf = new DataOutputBuffer(); - connectionContext.writeTo(buf); + ProtoUtil.makeIpcConnectionContext( + RPC.getProtocolName(remoteId.getProtocol()), + remoteId.getTicket(), + authMethod).writeTo(buf); // Write out the payload length int bufLen = buf.getLength(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 6a4684e7884..7abd6e9dacb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -29,6 +29,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.Set; +import java.util.regex.Pattern; import javax.security.sasl.Sasl; @@ -42,7 +43,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SaslInputStream; import org.apache.hadoop.security.SaslRpcClient; @@ -59,7 +59,7 @@ import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.log4j.Level; -import org.junit.BeforeClass; +import org.junit.Before; import org.junit.Test; /** Unit tests for using Sasl over RPC. */ @@ -76,8 +76,9 @@ public class TestSaslRPC { static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR"; private static Configuration conf; - @BeforeClass - public static void setup() { + + @Before + public void setup() { conf = new Configuration(); SecurityUtil.setAuthenticationMethod(KERBEROS, conf); UserGroupInformation.setConfiguration(conf); @@ -187,6 +188,7 @@ public Token selectToken(Text service, @TokenInfo(TestTokenSelector.class) public interface TestSaslProtocol extends TestRPC.TestProtocol { public AuthenticationMethod getAuthMethod() throws IOException; + public String getAuthUser() throws IOException; } public static class TestSaslImpl extends TestRPC.TestImpl implements @@ -195,6 +197,10 @@ public static class TestSaslImpl extends TestRPC.TestImpl implements public AuthenticationMethod getAuthMethod() throws IOException { return UserGroupInformation.getCurrentUser().getAuthenticationMethod(); } + @Override + public String getAuthUser() throws IOException { + return UserGroupInformation.getCurrentUser().getUserName(); + } } public static class CustomSecurityInfo extends SecurityInfo { @@ -261,6 +267,7 @@ public void testDigestRpcWithoutAnnotation() throws Exception { @Test public void testSecureToInsecureRpc() throws Exception { + SecurityUtil.setAuthenticationMethod(AuthenticationMethod.SIMPLE, conf); Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class) .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) .setNumHandlers(5).setVerbose(true).build(); @@ -448,129 +455,135 @@ static void testKerberosRpc(String principal, String keytab) throws Exception { System.out.println("Test is successful."); } - // insecure -> insecure + private static Pattern BadToken = + Pattern.compile(".*DIGEST-MD5: digest response format violation.*"); + private static Pattern KrbFailed = + Pattern.compile(".*Failed on local exception:.* " + + "Failed to specify server's Kerberos principal name.*"); + private static Pattern Denied = + Pattern.compile(".*Authorization .* is enabled .*"); + + /* + * simple server + */ @Test - public void testInsecureClientInsecureServer() throws Exception { - assertEquals(AuthenticationMethod.SIMPLE, - getAuthMethod(false, false, false)); + public void testSimpleServer() throws Exception { + assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE)); + // SASL methods are reverted to SIMPLE, but test setup fails + assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, SIMPLE)); } @Test - public void testInsecureClientInsecureServerWithToken() throws Exception { - assertEquals(AuthenticationMethod.TOKEN, - getAuthMethod(false, false, true)); + public void testSimpleServerWithTokens() throws Exception { + // Tokens are ignored because client is reverted to simple + assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, true)); + assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true)); + } + + @Test + public void testSimpleServerWithInvalidTokens() throws Exception { + // Tokens are ignored because client is reverted to simple + assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, false)); + assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false)); + } + + /* + * kerberos server + */ + @Test + public void testKerberosServer() throws Exception { + assertAuthEquals(Denied, getAuthMethod(SIMPLE, KERBEROS)); + assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, KERBEROS)); } - // insecure -> secure @Test - public void testInsecureClientSecureServer() throws Exception { - RemoteException e = null; + public void testKerberosServerWithTokens() throws Exception { + // can use tokens regardless of auth + assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, KERBEROS, true)); + assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, true)); + } + + @Test + public void testKerberosServerWithInvalidTokens() throws Exception { + assertAuthEquals(BadToken, getAuthMethod(SIMPLE, KERBEROS, false)); + assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, false)); + } + + + // test helpers + + private String getAuthMethod( + final AuthenticationMethod clientAuth, + final AuthenticationMethod serverAuth) throws Exception { try { - getAuthMethod(false, true, false); - } catch (RemoteException re) { - e = re; - } - assertNotNull(e); - assertEquals(AccessControlException.class.getName(), e.getClassName()); - } - - @Test - public void testInsecureClientSecureServerWithToken() throws Exception { - assertEquals(AuthenticationMethod.TOKEN, - getAuthMethod(false, true, true)); - } - - // secure -> secure - @Test - public void testSecureClientSecureServer() throws Exception { - /* Should be this when multiple secure auths are supported and we can - * dummy one out: - * assertEquals(AuthenticationMethod.SECURE_AUTH_METHOD, - * getAuthMethod(true, true, false)); - */ - try { - getAuthMethod(true, true, false); - } catch (IOException ioe) { - // can't actually test kerberos w/o kerberos... - String expectedError = "Failed to specify server's Kerberos principal"; - String actualError = ioe.getMessage(); - assertTrue("["+actualError+"] doesn't start with ["+expectedError+"]", - actualError.contains(expectedError)); + return internalGetAuthMethod(clientAuth, serverAuth, false, false); + } catch (Exception e) { + return e.toString(); } } - @Test - public void testSecureClientSecureServerWithToken() throws Exception { - assertEquals(AuthenticationMethod.TOKEN, - getAuthMethod(true, true, true)); - } - - // secure -> insecure - @Test - public void testSecureClientInsecureServerWithToken() throws Exception { - assertEquals(AuthenticationMethod.TOKEN, - getAuthMethod(true, false, true)); - } - - @Test - public void testSecureClientInsecureServer() throws Exception { - /* Should be this when multiple secure auths are supported and we can - * dummy one out: - * assertEquals(AuthenticationMethod.SIMPLE - * getAuthMethod(true, false, false)); - */ + private String getAuthMethod( + final AuthenticationMethod clientAuth, + final AuthenticationMethod serverAuth, + final boolean useValidToken) throws Exception { try { - getAuthMethod(true, false, false); - } catch (IOException ioe) { - // can't actually test kerberos w/o kerberos... - String expectedError = "Failed to specify server's Kerberos principal"; - String actualError = ioe.getMessage(); - assertTrue("["+actualError+"] doesn't start with ["+expectedError+"]", - actualError.contains(expectedError)); + return internalGetAuthMethod(clientAuth, serverAuth, true, useValidToken); + } catch (Exception e) { + return e.toString(); } } - - - private AuthenticationMethod getAuthMethod(final boolean isSecureClient, - final boolean isSecureServer, - final boolean useToken - - ) throws Exception { + + private String internalGetAuthMethod( + final AuthenticationMethod clientAuth, + final AuthenticationMethod serverAuth, + final boolean useToken, + final boolean useValidToken) throws Exception { + Configuration serverConf = new Configuration(conf); - SecurityUtil.setAuthenticationMethod( - isSecureServer ? KERBEROS : SIMPLE, serverConf); + SecurityUtil.setAuthenticationMethod(serverAuth, serverConf); UserGroupInformation.setConfiguration(serverConf); TestTokenSecretManager sm = new TestTokenSecretManager(); Server server = new RPC.Builder(serverConf).setProtocol(TestSaslProtocol.class) .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) - .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); + .setNumHandlers(5).setVerbose(true) + .setSecretManager((serverAuth != SIMPLE) ? sm : null) + .build(); server.start(); - final UserGroupInformation current = UserGroupInformation.getCurrentUser(); + final UserGroupInformation clientUgi = + UserGroupInformation.createRemoteUser( + UserGroupInformation.getCurrentUser().getUserName()+"-CLIENT"); final InetSocketAddress addr = NetUtils.getConnectAddress(server); if (useToken) { TestTokenIdentifier tokenId = new TestTokenIdentifier( - new Text(current.getUserName())); - Token token = - new Token(tokenId, sm); + new Text(clientUgi.getUserName())); + Token token = useValidToken + ? new Token(tokenId, sm) + : new Token( + tokenId.getBytes(), "bad-password!".getBytes(), + tokenId.getKind(), null); + SecurityUtil.setTokenService(token, addr); - current.addToken(token); + clientUgi.addToken(token); } final Configuration clientConf = new Configuration(conf); - SecurityUtil.setAuthenticationMethod( - isSecureClient ? KERBEROS : SIMPLE, clientConf); + SecurityUtil.setAuthenticationMethod(clientAuth, clientConf); UserGroupInformation.setConfiguration(clientConf); + try { - return current.doAs(new PrivilegedExceptionAction() { + return clientUgi.doAs(new PrivilegedExceptionAction() { @Override - public AuthenticationMethod run() throws IOException { + public String run() throws IOException { TestSaslProtocol proxy = null; try { proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, clientConf); - return proxy.getAuthMethod(); + + // make sure the other side thinks we are who we said we are!!! + assertEquals(clientUgi.getUserName(), proxy.getAuthUser()); + return proxy.getAuthMethod().toString(); } finally { if (proxy != null) { RPC.stopProxy(proxy); @@ -582,7 +595,22 @@ public AuthenticationMethod run() throws IOException { server.stop(); } } + + private static void assertAuthEquals(AuthenticationMethod expect, + String actual) { + assertEquals(expect.toString(), actual); + } + private static void assertAuthEquals(Pattern expect, + String actual) { + // this allows us to see the regexp and the value it didn't match + if (!expect.matcher(actual).matches()) { + assertEquals(expect, actual); // it failed + } else { + assertTrue(true); // it matched + } + } + public static void main(String[] args) throws Exception { System.out.println("Testing Kerberos authentication over RPC"); if (args.length != 2) { @@ -595,5 +623,4 @@ public static void main(String[] args) throws Exception { String keytab = args[1]; testKerberosRpc(principal, keytab); } - } From 3a698e6aea4c1aac520569376fc0ef87289ff7a2 Mon Sep 17 00:00:00 2001 From: Daryn Sharp Date: Tue, 6 Nov 2012 15:57:58 +0000 Subject: [PATCH 10/14] HDFS-1331. dfs -test should work like /bin/test (Andy Isaacson via daryn) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406198 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/fs/shell/Test.java | 19 +++- .../src/test/resources/testConf.xml | 4 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/TestDFSShell.java | 101 +++++++++++++++++- 4 files changed, 119 insertions(+), 7 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java index 9780698b3a8..011cdac1f86 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java @@ -37,16 +37,21 @@ public static void registerCommands(CommandFactory factory) { } public static final String NAME = "test"; - public static final String USAGE = "-[ezd] "; + public static final String USAGE = "-[defsz] "; public static final String DESCRIPTION = - "If file exists, has zero length, is a directory\n" + - "then return 0, else return 1."; + "Answer various questions about , with result via exit status.\n" + + " -d return 0 if is a directory.\n" + + " -e return 0 if exists.\n" + + " -f return 0 if is a file.\n" + + " -s return 0 if file is greater than zero bytes in size.\n" + + " -z return 0 if file is zero bytes in size.\n" + + "else, return 1."; private char flag; @Override protected void processOptions(LinkedList args) { - CommandFormat cf = new CommandFormat(1, 1, "e", "d", "z"); + CommandFormat cf = new CommandFormat(1, 1, "e", "d", "f", "s", "z"); cf.parse(args); String[] opts = cf.getOpts().toArray(new String[0]); @@ -71,6 +76,12 @@ protected void processPath(PathData item) throws IOException { case 'd': test = item.stat.isDirectory(); break; + case 'f': + test = item.stat.isFile(); + break; + case 's': + test = (item.stat.getLen() > 0); + break; case 'z': test = (item.stat.getLen() == 0); break; diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index ade6e1a04bb..65a522b1b73 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -591,11 +591,11 @@ RegexpComparator - ^-test -\[ezd\] <path>:\s+If file exists, has zero length, is a directory( )* + ^-test -\[defsz\] <path>:\sAnswer various questions about <path>, with result via exit status. RegexpComparator - ^( |\t)*then return 0, else return 1.( )* + ^( |\t)*else, return 1.( )* diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 63a9efe6ad2..3ff09794d1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -551,6 +551,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4132. When libwebhdfs is not enabled, nativeMiniDfsClient frees uninitialized memory (Colin Patrick McCabe via todd) + HDFS-1331. dfs -test should work like /bin/test (Andy Isaacson via daryn) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 3e9026abf55..f24444fb693 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -1243,7 +1243,106 @@ public void testDFSShell() throws IOException { } assertEquals(0, val); } - + + // Verify -test -f negative case (missing file) + { + String[] args = new String[3]; + args[0] = "-test"; + args[1] = "-f"; + args[2] = "/test/mkdirs/noFileHere"; + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from DFSShell.run " + + e.getLocalizedMessage()); + } + assertEquals(1, val); + } + + // Verify -test -f negative case (directory rather than file) + { + String[] args = new String[3]; + args[0] = "-test"; + args[1] = "-f"; + args[2] = "/test/mkdirs"; + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from DFSShell.run " + + e.getLocalizedMessage()); + } + assertEquals(1, val); + } + + // Verify -test -f positive case + { + writeFile(fileSys, myFile); + assertTrue(fileSys.exists(myFile)); + + String[] args = new String[3]; + args[0] = "-test"; + args[1] = "-f"; + args[2] = myFile.toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from DFSShell.run " + + e.getLocalizedMessage()); + } + assertEquals(0, val); + } + + // Verify -test -s negative case (missing file) + { + String[] args = new String[3]; + args[0] = "-test"; + args[1] = "-s"; + args[2] = "/test/mkdirs/noFileHere"; + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from DFSShell.run " + + e.getLocalizedMessage()); + } + assertEquals(1, val); + } + + // Verify -test -s negative case (zero length file) + { + String[] args = new String[3]; + args[0] = "-test"; + args[1] = "-s"; + args[2] = "/test/mkdirs/isFileHere"; + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from DFSShell.run " + + e.getLocalizedMessage()); + } + assertEquals(1, val); + } + + // Verify -test -s positive case (nonzero length file) + { + String[] args = new String[3]; + args[0] = "-test"; + args[1] = "-s"; + args[2] = myFile.toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from DFSShell.run " + + e.getLocalizedMessage()); + } + assertEquals(0, val); + } + } finally { try { fileSys.close(); From b13a5cdcb7e4094b650b246a00848a0225d36cce Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 6 Nov 2012 16:12:43 +0000 Subject: [PATCH 11/14] HADOOP-9004. Allow security unit tests to use external KDC. Contributed by Stephen Chu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406202 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../security/SecurityUtilTestHelper.java | 15 ++ .../security/TestUGIWithExternalKdc.java | 74 ++++++++++ .../datanode/SecureDataNodeStarter.java | 37 +++-- .../apache/hadoop/hdfs/MiniDFSCluster.java | 44 +++++- .../datanode/TestStartSecureDataNode.java | 117 ++++++++++++++++ .../TestSecureNameNodeWithExternalKdc.java | 129 ++++++++++++++++++ 7 files changed, 398 insertions(+), 21 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e349cb4526a..669c9e64d82 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -129,6 +129,9 @@ Trunk (Unreleased) HADOOP-8776. Provide an option in test-patch that can enable/disable compiling native code. (Chris Nauroth via suresh) + HADOOP-9004. Allow security unit tests to use external KDC. (Stephen Chu + via suresh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java index 7c5f5e1e146..8a9ad05c349 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java @@ -27,4 +27,19 @@ public class SecurityUtilTestHelper { public static void setTokenServiceUseIp(boolean flag) { SecurityUtil.setTokenServiceUseIp(flag); } + + /** + * Return true if externalKdc=true and the location of the krb5.conf + * file has been specified, and false otherwise. + */ + public static boolean isExternalKdcRunning() { + String externalKdc = System.getProperty("externalKdc"); + String krb5Conf = System.getProperty("java.security.krb5.conf"); + if(externalKdc == null || !externalKdc.equals("true") || + krb5Conf == null) { + return false; + } + return true; + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java new file mode 100644 index 00000000000..2f55b11fd1f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.security; + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests kerberos keytab login using a user-specified external KDC + * + * To run, users must specify the following system properties: + * externalKdc=true + * java.security.krb5.conf + * user.principal + * user.keytab + */ +public class TestUGIWithExternalKdc { + + @Before + public void testExternalKdcRunning() { + Assume.assumeTrue(isExternalKdcRunning()); + } + + @Test + public void testLogin() throws IOException { + String userPrincipal = System.getProperty("user.principal"); + String userKeyTab = System.getProperty("user.keytab"); + Assert.assertNotNull("User principal was not specified", userPrincipal); + Assert.assertNotNull("User keytab was not specified", userKeyTab); + + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + UserGroupInformation.setConfiguration(conf); + + UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); + + Assert.assertEquals(AuthenticationMethod.KERBEROS, + ugi.getAuthenticationMethod()); + + try { + UserGroupInformation + .loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM", userKeyTab); + Assert.fail("Login should have failed"); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index bcfcd9f76f6..c5e9c9ca851 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -38,6 +38,8 @@ import javax.net.ssl.SSLServerSocketFactory; +import com.google.common.annotations.VisibleForTesting; + /** * Utility class to start a datanode in a secure cluster, first obtaining * privileged resources before main startup and handing them to the datanode. @@ -73,6 +75,25 @@ public void init(DaemonContext context) throws Exception { // Stash command-line arguments for regular datanode args = context.getArguments(); + sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); + resources = getSecureResources(sslFactory, conf); + } + + @Override + public void start() throws Exception { + System.err.println("Starting regular datanode initialization"); + DataNode.secureMain(args, resources); + } + + @Override public void destroy() { + sslFactory.destroy(); + } + + @Override public void stop() throws Exception { /* Nothing to do */ } + + @VisibleForTesting + public static SecureResources getSecureResources(final SSLFactory sslFactory, + Configuration conf) throws Exception { // Obtain secure port for data streaming to datanode InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, @@ -85,13 +106,12 @@ public void init(DaemonContext context) throws Exception { // Check that we got the port we need if (ss.getLocalPort() != streamingAddr.getPort()) { throw new RuntimeException("Unable to bind on specified streaming port in secure " + - "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); + "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); } // Obtain secure listener for web server Connector listener; if (HttpConfig.isSecure()) { - sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); try { sslFactory.init(); } catch (GeneralSecurityException ex) { @@ -126,18 +146,7 @@ protected SSLServerSocketFactory createFactory() throws Exception { } System.err.println("Opened streaming server at " + streamingAddr); System.err.println("Opened info server at " + infoSocAddr); - resources = new SecureResources(ss, listener); + return new SecureResources(ss, listener); } - @Override - public void start() throws Exception { - System.err.println("Starting regular datanode initialization"); - DataNode.secureMain(args, resources); - } - - @Override public void destroy() { - sslFactory.destroy(); - } - - @Override public void stop() throws Exception { /* Nothing to do */ } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 0c238581ba4..851b52541f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -81,6 +81,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; +import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter; +import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -95,6 +97,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -145,6 +148,7 @@ public static class Builder { private boolean setupHostsFile = false; private MiniDFSNNTopology nnTopology = null; private boolean checkExitOnShutdown = true; + private boolean checkDataNodeAddrConfig = false; private boolean checkDataNodeHostConfig = false; public Builder(Configuration conf) { @@ -263,6 +267,14 @@ public Builder checkExitOnShutdown(boolean val) { return this; } + /** + * Default: false + */ + public Builder checkDataNodeAddrConfig(boolean val) { + this.checkDataNodeAddrConfig = val; + return this; + } + /** * Default: false */ @@ -336,6 +348,7 @@ private MiniDFSCluster(Builder builder) throws IOException { builder.setupHostsFile, builder.nnTopology, builder.checkExitOnShutdown, + builder.checkDataNodeAddrConfig, builder.checkDataNodeHostConfig); } @@ -343,11 +356,14 @@ public class DataNodeProperties { DataNode datanode; Configuration conf; String[] dnArgs; + SecureResources secureResources; - DataNodeProperties(DataNode node, Configuration conf, String[] args) { + DataNodeProperties(DataNode node, Configuration conf, String[] args, + SecureResources secureResources) { this.datanode = node; this.conf = conf; this.dnArgs = args; + this.secureResources = secureResources; } } @@ -573,7 +589,7 @@ public MiniDFSCluster(int nameNodePort, manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, operation, racks, hosts, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false); + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false); } private void initMiniDFSCluster( @@ -584,6 +600,7 @@ private void initMiniDFSCluster( String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, + boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig) throws IOException { ExitUtil.disableSystemExit(); @@ -647,7 +664,7 @@ private void initMiniDFSCluster( // Start the DataNodes startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, - hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig); + hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); @@ -1161,7 +1178,18 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, if (hosts != null) { NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost"); } - DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); + + SecureResources secureResources = null; + if (UserGroupInformation.isSecurityEnabled()) { + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf); + try { + secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf, + secureResources); if(dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); @@ -1176,7 +1204,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, racks[i-curDatanodesNum]); } dn.runDatanodeDaemon(); - dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs)); + dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources)); } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; @@ -1607,14 +1635,16 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException { Configuration conf = dnprop.conf; String[] args = dnprop.dnArgs; + SecureResources secureResources = dnprop.secureResources; Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getXferAddress(); conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); } - dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), - newconf, args)); + dataNodes.add(new DataNodeProperties( + DataNode.createDataNode(args, conf, secureResources), + newconf, args, secureResources)); numDataNodes++; return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java new file mode 100644 index 00000000000..d1b2d668ded --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +/** + * This test starts a 1 NameNode 1 DataNode MiniDFSCluster with + * kerberos authentication enabled using user-specified KDC, + * principals, and keytabs. + * + * A secure DataNode has to be started by root, so this test needs to + * be run by root. + * + * To run, users must specify the following system properties: + * externalKdc=true + * java.security.krb5.conf + * dfs.namenode.kerberos.principal + * dfs.namenode.kerberos.internal.spnego.principal + * dfs.namenode.keytab.file + * dfs.datanode.kerberos.principal + * dfs.datanode.keytab.file + */ +public class TestStartSecureDataNode { + final static private int NUM_OF_DATANODES = 1; + + @Before + public void testExternalKdcRunning() { + // Tests are skipped if external KDC is not running. + Assume.assumeTrue(isExternalKdcRunning()); + } + + @Test + public void testSecureNameNode() throws IOException, InterruptedException { + MiniDFSCluster cluster = null; + try { + String nnPrincipal = + System.getProperty("dfs.namenode.kerberos.principal"); + String nnSpnegoPrincipal = + System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); + String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); + assertNotNull("NameNode principal was not specified", nnPrincipal); + assertNotNull("NameNode SPNEGO principal was not specified", + nnSpnegoPrincipal); + assertNotNull("NameNode keytab was not specified", nnKeyTab); + + String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal"); + String dnKeyTab = System.getProperty("dfs.datanode.keytab.file"); + assertNotNull("DataNode principal was not specified", dnPrincipal); + assertNotNull("DataNode keytab was not specified", dnKeyTab); + + Configuration conf = new HdfsConfiguration(); + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, nnPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, + nnSpnegoPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab); + conf.set(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, dnPrincipal); + conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, dnKeyTab); + // Secure DataNode requires using ports lower than 1024. + conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:1004"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:1006"); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, "700"); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_OF_DATANODES) + .checkDataNodeAddrConfig(true) + .build(); + cluster.waitActive(); + assertTrue(cluster.isDataNodeUp()); + + } catch (Exception ex) { + ex.printStackTrace(); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java new file mode 100644 index 00000000000..e98e112c63b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +/** + * This test brings up a MiniDFSCluster with 1 NameNode and 0 + * DataNodes with kerberos authentication enabled using user-specified + * KDC, principals, and keytabs. + * + * To run, users must specify the following system properties: + * externalKdc=true + * java.security.krb5.conf + * dfs.namenode.kerberos.principal + * dfs.namenode.kerberos.internal.spnego.principal + * dfs.namenode.keytab.file + * user.principal (do not specify superuser!) + * user.keytab + */ +public class TestSecureNameNodeWithExternalKdc { + final static private int NUM_OF_DATANODES = 0; + + @Before + public void testExternalKdcRunning() { + // Tests are skipped if external KDC is not running. + Assume.assumeTrue(isExternalKdcRunning()); + } + + @Test + public void testSecureNameNode() throws IOException, InterruptedException { + MiniDFSCluster cluster = null; + try { + String nnPrincipal = + System.getProperty("dfs.namenode.kerberos.principal"); + String nnSpnegoPrincipal = + System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); + String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); + assertNotNull("NameNode principal was not specified", nnPrincipal); + assertNotNull("NameNode SPNEGO principal was not specified", + nnSpnegoPrincipal); + assertNotNull("NameNode keytab was not specified", nnKeyTab); + + Configuration conf = new HdfsConfiguration(); + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, nnPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, + nnSpnegoPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES) + .build(); + final MiniDFSCluster clusterRef = cluster; + cluster.waitActive(); + FileSystem fsForCurrentUser = cluster.getFileSystem(); + fsForCurrentUser.mkdirs(new Path("/tmp")); + fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission( + (short) 511)); + + // The user specified should not be a superuser + String userPrincipal = System.getProperty("user.principal"); + String userKeyTab = System.getProperty("user.keytab"); + assertNotNull("User principal was not specified", userPrincipal); + assertNotNull("User keytab was not specified", userKeyTab); + + UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); + FileSystem fs = ugi.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return clusterRef.getFileSystem(); + } + }); + try { + Path p = new Path("/users"); + fs.mkdirs(p); + fail("User must not be allowed to write in /"); + } catch (IOException expected) { + } + + Path p = new Path("/tmp/alpha"); + fs.mkdirs(p); + assertNotNull(fs.listStatus(p)); + assertEquals(AuthenticationMethod.KERBEROS, + ugi.getAuthenticationMethod()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} From b68bd472dcc09ac2baf059cd4544bed75feed3da Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 6 Nov 2012 18:48:49 +0000 Subject: [PATCH 12/14] YARN-202. Log Aggregation generates a storm of fsync() for namenode (Kihwal Lee via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406269 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 86b5336d503..a6102b216c4 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -199,6 +199,9 @@ Release 0.23.5 - UNRELEASED YARN-189. Fixed a deadlock between RM's ApplicationMasterService and the dispatcher. (Thomas Graves via vinodkv) + YARN-202. Log Aggregation generates a storm of fsync() for namenode + (Kihwal Lee via bobby) + Release 0.23.4 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java index 008324f013a..4b8dff91042 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java @@ -263,7 +263,6 @@ public void append(LogKey logKey, LogValue logValue) throws IOException { out = this.writer.prepareAppendValue(-1); logValue.write(out); out.close(); - this.fsDataOStream.hflush(); } public void closeWriter() { From 54b70db347c2ebf577919f2c42f171c6801e9ba1 Mon Sep 17 00:00:00 2001 From: Daryn Sharp Date: Tue, 6 Nov 2012 19:27:47 +0000 Subject: [PATCH 13/14] HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406278 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++++ .../hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 6 +++++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3ff09794d1a..e57fa487009 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1940,6 +1940,8 @@ Release 0.23.5 - UNRELEASED OPTIMIZATIONS + HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn) + BUG FIXES HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index b2f344d17a0..3a971fbf0d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2696,6 +2696,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, void processOverReplicatedBlocksOnReCommission( final DatanodeDescriptor srcNode) { final Iterator it = srcNode.getBlockIterator(); + int numOverReplicated = 0; while(it.hasNext()) { final Block block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); @@ -2705,8 +2706,11 @@ void processOverReplicatedBlocksOnReCommission( if (numCurrentReplica > expectedReplication) { // over-replicated block processOverReplicatedBlock(block, expectedReplication, null, null); + numOverReplicated++; } } + LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on " + + srcNode + " during recommissioning"); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index a8d31392156..23013d7d911 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -608,7 +608,11 @@ void stopDecommission(DatanodeDescriptor node) { if (node.isDecommissionInProgress() || node.isDecommissioned()) { LOG.info("Stop Decommissioning " + node); heartbeatManager.stopDecommission(node); - blockManager.processOverReplicatedBlocksOnReCommission(node); + // Over-replicated blocks will be detected and processed when + // the dead node comes back and send in its full block report. + if (node.isAlive) { + blockManager.processOverReplicatedBlocksOnReCommission(node); + } } } From 1734215a10fd93e38849ed0235b5e026b7f50f83 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 6 Nov 2012 21:04:32 +0000 Subject: [PATCH 14/14] HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in INode.collectSubtreeBlocksAndClear(..). Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406326 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSDirectory.java | 9 +-- .../hdfs/server/namenode/FSNamesystem.java | 35 ++++++++---- .../hadoop/hdfs/server/namenode/INode.java | 56 +++++++++++++++++-- .../hdfs/server/namenode/INodeDirectory.java | 5 +- .../hdfs/server/namenode/INodeFile.java | 7 +-- .../hdfs/server/namenode/INodeSymlink.java | 5 +- 7 files changed, 89 insertions(+), 31 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e57fa487009..6dbd6665d7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -154,6 +154,9 @@ Trunk (Unreleased) HDFS-4151. Change the methods in FSDirectory to pass INodesInPath instead of INode[] as a parameter. (szetszwo) + HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in + INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 03d9e4387f0..66103bcf32e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.util.ByteArray; @@ -757,7 +758,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, if (removedDst != null) { INode rmdst = removedDst; removedDst = null; - List collectedBlocks = new ArrayList(); + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks); getFSNamesystem().removePathAndBlocks(src, collectedBlocks); } @@ -996,7 +997,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp) * @param collectedBlocks Blocks under the deleted directory * @return true on successful deletion; else false */ - boolean delete(String src, ListcollectedBlocks) + boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) throws UnresolvedLinkException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src); @@ -1049,7 +1050,7 @@ boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException { void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException { assert hasWriteLock(); - List collectedBlocks = new ArrayList(); + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime); if (filesRemoved > 0) { getFSNamesystem().removePathAndBlocks(src, collectedBlocks); @@ -1064,7 +1065,7 @@ void unprotectedDelete(String src, long mtime) * @param mtime the time the inode is removed * @return the number of inodes deleted; 0 if no inodes are deleted. */ - int unprotectedDelete(String src, List collectedBlocks, + int unprotectedDelete(String src, BlocksMapUpdateInfo collectedBlocks, long mtime) throws UnresolvedLinkException { assert hasWriteLock(); src = normalizePath(src); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 320c437bea0..106718fe2f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -17,20 +17,20 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; @@ -159,6 +159,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Util; +import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -2667,7 +2668,7 @@ private boolean deleteInternal(String src, boolean recursive, boolean enforcePermission) throws AccessControlException, SafeModeException, UnresolvedLinkException, IOException { - ArrayList collectedBlocks = new ArrayList(); + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); writeLock(); try { @@ -2698,21 +2699,26 @@ private boolean deleteInternal(String src, boolean recursive, return true; } - /** + /** * From the given list, incrementally remove the blocks from blockManager * Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to * ensure that other waiters on the lock can get in. See HDFS-2938 + * + * @param blocks + * An instance of {@link BlocksMapUpdateInfo} which contains a list + * of blocks that need to be removed from blocksMap */ - private void removeBlocks(List blocks) { + private void removeBlocks(BlocksMapUpdateInfo blocks) { int start = 0; int end = 0; - while (start < blocks.size()) { + List toDeleteList = blocks.getToDeleteList(); + while (start < toDeleteList.size()) { end = BLOCK_DELETION_INCREMENT + start; - end = end > blocks.size() ? blocks.size() : end; + end = end > toDeleteList.size() ? toDeleteList.size() : end; writeLock(); try { for (int i = start; i < end; i++) { - blockManager.removeBlock(blocks.get(i)); + blockManager.removeBlock(toDeleteList.get(i)); } } finally { writeUnlock(); @@ -2721,7 +2727,12 @@ private void removeBlocks(List blocks) { } } - void removePathAndBlocks(String src, List blocks) { + /** + * Remove leases and blocks related to a given path + * @param src The given path + * @param blocks Containing the list of blocks to be deleted from blocksMap + */ + void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks) { assert hasWriteLock(); leaseManager.removeLeaseWithPrefixPath(src); if (blocks == null) { @@ -2734,7 +2745,7 @@ void removePathAndBlocks(String src, List blocks) { boolean trackBlockCounts = isSafeModeTrackingBlocks(); int numRemovedComplete = 0, numRemovedSafe = 0; - for (Block b : blocks) { + for (Block b : blocks.getToDeleteList()) { if (trackBlockCounts) { BlockInfo bi = blockManager.getStoredBlock(b); if (bi.isComplete()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index f5dcb62f0de..27a78cb4f10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -188,11 +188,15 @@ public boolean isDirectory() { } /** - * Collect all the blocks in all children of this INode. - * Count and return the number of files in the sub tree. - * Also clears references since this INode is deleted. + * Collect all the blocks in all children of this INode. Count and return the + * number of files in the sub tree. Also clears references since this INode is + * deleted. + * + * @param info + * Containing all the blocks collected from the children of this + * INode. These blocks later should be removed from the blocksMap. */ - abstract int collectSubtreeBlocksAndClear(List v); + abstract int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info); /** Compute {@link ContentSummary}. */ public final ContentSummary computeContentSummary() { @@ -488,4 +492,48 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) { out.print(s.substring(s.lastIndexOf(getClass().getSimpleName()))); out.println(")"); } + + /** + * Information used for updating the blocksMap when deleting files. + */ + public static class BlocksMapUpdateInfo { + /** + * The list of blocks that need to be removed from blocksMap + */ + private List toDeleteList; + + public BlocksMapUpdateInfo(List toDeleteList) { + this.toDeleteList = toDeleteList == null ? new ArrayList() + : toDeleteList; + } + + public BlocksMapUpdateInfo() { + toDeleteList = new ArrayList(); + } + + /** + * @return The list of blocks that need to be removed from blocksMap + */ + public List getToDeleteList() { + return toDeleteList; + } + + /** + * Add a to-be-deleted block into the + * {@link BlocksMapUpdateInfo#toDeleteList} + * @param toDelete the to-be-deleted block + */ + public void addDeleteBlock(Block toDelete) { + if (toDelete != null) { + toDeleteList.add(toDelete); + } + } + + /** + * Clear {@link BlocksMapUpdateInfo#toDeleteList} + */ + public void clear() { + toDeleteList.clear(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index ef3a9405114..078251c8191 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import com.google.common.annotations.VisibleForTesting; @@ -429,13 +428,13 @@ public List getChildren() { } @Override - int collectSubtreeBlocksAndClear(List v) { + int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) { int total = 1; if (children == null) { return total; } for (INode child : children) { - total += child.collectSubtreeBlocksAndClear(v); + total += child.collectSubtreeBlocksAndClear(info); } parent = null; children = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 6449493409f..dcc52a4d010 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -19,7 +19,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; @@ -152,11 +151,11 @@ public void setBlocks(BlockInfo[] blocks) { } @Override - int collectSubtreeBlocksAndClear(List v) { + int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) { parent = null; - if(blocks != null && v != null) { + if(blocks != null && info != null) { for (BlockInfo blk : blocks) { - v.add(blk); + info.addDeleteBlock(blk); blk.setBlockCollection(null); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index dbeae1bd53c..725f1443309 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -17,12 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.util.List; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; /** * An INode representing a symbolic link. @@ -64,7 +61,7 @@ DirCounts spaceConsumedInTree(DirCounts counts) { } @Override - int collectSubtreeBlocksAndClear(List v) { + int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) { return 1; }