Merge branch 'trunk' into HDFS-6581

This commit is contained in:
arp 2014-09-12 11:45:11 -07:00
commit 7aeda47bd9
201 changed files with 6975 additions and 484 deletions

View File

@ -102,9 +102,6 @@ Trunk (Unreleased)
HADOOP-9486. Promoted Windows and Shell related utils from YARN to Hadoop HADOOP-9486. Promoted Windows and Shell related utils from YARN to Hadoop
Common. (Chris Nauroth via vinodkv) Common. (Chris Nauroth via vinodkv)
HADOOP-9540. Expose the InMemoryS3 and S3N FilesystemStores implementations
for Unit testing. (Hari via stevel)
HADOOP-8844. Add a plaintext fs -text test-case. HADOOP-8844. Add a plaintext fs -text test-case.
(Akira AJISAKA via harsh) (Akira AJISAKA via harsh)
@ -129,8 +126,6 @@ Trunk (Unreleased)
HADOOP-11041. VersionInfo specifies subversion (Tsuyoshi OZAWA via aw) HADOOP-11041. VersionInfo specifies subversion (Tsuyoshi OZAWA via aw)
HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel)
BUG FIXES BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled. HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -506,6 +501,19 @@ Release 2.6.0 - UNRELEASED
HADOOP-11070. Create MiniKMS for testing. (tucu) HADOOP-11070. Create MiniKMS for testing. (tucu)
HADOOP-11057. checknative command to probe for winutils.exe on windows.
(Xiaoyu Yao via cnauroth)
HADOOP-10758. KMS: add ACLs on per key basis. (tucu)
HADOOP-9540. Expose the InMemoryS3 and S3N FilesystemStores implementations
for Unit testing. (Hari via stevel)
HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel)
HADOOP-11074. Move s3-related FS connector code to hadoop-aws (David S.
Wang via Colin Patrick McCabe)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd) HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
@ -774,7 +782,18 @@ Release 2.6.0 - UNRELEASED
HADOOP-10925. Compilation fails in native link0 function on Windows. HADOOP-10925. Compilation fails in native link0 function on Windows.
(cnauroth) (cnauroth)
Release 2.5.1 - UNRELEASED HADOOP-11077. NPE if hosts not specified in ProxyUsers. (gchanan via tucu)
HADOOP-9989. Bug introduced in HADOOP-9374, which parses the -tokenCacheFile
as binary file but set it to the configuration as JSON file. (zxu via tucu)
HADOOP-11085. Excessive logging by org.apache.hadoop.util.Progress when
value is NaN (Mit Desai via jlowe)
HADOOP-11083. After refactoring of HTTP proxyuser to common, doAs param is
case sensitive. (tucu)
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -123,7 +123,7 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
MachineList MachineList = proxyHosts.get( MachineList MachineList = proxyHosts.get(
getProxySuperuserIpConfKey(realUser.getShortUserName())); getProxySuperuserIpConfKey(realUser.getShortUserName()));
if(!MachineList.includes(remoteAddress)) { if(MachineList == null || !MachineList.includes(remoteAddress)) {
throw new AuthorizationException("Unauthorized connection for super-user: " throw new AuthorizationException("Unauthorized connection for super-user: "
+ realUser.getUserName() + " from IP " + remoteAddress); + realUser.getUserName() + " from IP " + remoteAddress);
} }

View File

@ -188,7 +188,8 @@ public class DelegationTokenAuthenticationFilter
UTF8_CHARSET); UTF8_CHARSET);
if (list != null) { if (list != null) {
for (NameValuePair nv : list) { for (NameValuePair nv : list) {
if (DelegationTokenAuthenticatedURL.DO_AS.equals(nv.getName())) { if (DelegationTokenAuthenticatedURL.DO_AS.
equalsIgnoreCase(nv.getName())) {
return nv.getValue(); return nv.getValue();
} }
} }

View File

@ -332,7 +332,7 @@ public class GenericOptionsParser {
} }
UserGroupInformation.getCurrentUser().addCredentials( UserGroupInformation.getCurrentUser().addCredentials(
Credentials.readTokenStorageFile(p, conf)); Credentials.readTokenStorageFile(p, conf));
conf.set("mapreduce.job.credentials.json", p.toString(), conf.set("mapreduce.job.credentials.binary", p.toString(),
"from -tokenCacheFile command line option"); "from -tokenCacheFile command line option");
} }

View File

@ -37,7 +37,8 @@ public class NativeLibraryChecker {
public static void main(String[] args) { public static void main(String[] args) {
String usage = "NativeLibraryChecker [-a|-h]\n" String usage = "NativeLibraryChecker [-a|-h]\n"
+ " -a use -a to check all libraries are available\n" + " -a use -a to check all libraries are available\n"
+ " by default just check hadoop library is available\n" + " by default just check hadoop library (and\n"
+ " winutils.exe on Windows OS) is available\n"
+ " exit with error code 1 if check failed\n" + " exit with error code 1 if check failed\n"
+ " -h print this message\n"; + " -h print this message\n";
if (args.length > 1 || if (args.length > 1 ||
@ -62,12 +63,16 @@ public class NativeLibraryChecker {
boolean lz4Loaded = nativeHadoopLoaded; boolean lz4Loaded = nativeHadoopLoaded;
boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf); boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf);
boolean openSslLoaded = false; boolean openSslLoaded = false;
boolean winutilsExists = false;
String openSslDetail = ""; String openSslDetail = "";
String hadoopLibraryName = ""; String hadoopLibraryName = "";
String zlibLibraryName = ""; String zlibLibraryName = "";
String snappyLibraryName = ""; String snappyLibraryName = "";
String lz4LibraryName = ""; String lz4LibraryName = "";
String bzip2LibraryName = ""; String bzip2LibraryName = "";
String winutilsPath = null;
if (nativeHadoopLoaded) { if (nativeHadoopLoaded) {
hadoopLibraryName = NativeCodeLoader.getLibraryName(); hadoopLibraryName = NativeCodeLoader.getLibraryName();
zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf); zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf);
@ -93,6 +98,15 @@ public class NativeLibraryChecker {
bzip2LibraryName = Bzip2Factory.getLibraryName(conf); bzip2LibraryName = Bzip2Factory.getLibraryName(conf);
} }
} }
// winutils.exe is required on Windows
winutilsPath = Shell.getWinUtilsPath();
if (winutilsPath != null) {
winutilsExists = true;
} else {
winutilsPath = "";
}
System.out.println("Native library checking:"); System.out.println("Native library checking:");
System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName); System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName);
System.out.printf("zlib: %b %s\n", zlibLoaded, zlibLibraryName); System.out.printf("zlib: %b %s\n", zlibLoaded, zlibLibraryName);
@ -100,7 +114,11 @@ public class NativeLibraryChecker {
System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName); System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName);
System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName); System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName);
System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail); System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail);
if ((!nativeHadoopLoaded) || if (Shell.WINDOWS) {
System.out.printf("winutils: %b %s\n", winutilsExists, winutilsPath);
}
if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) ||
(checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) { (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) {
// return 1 to indicated check failed // return 1 to indicated check failed
ExitUtil.terminate(1); ExitUtil.terminate(1);

View File

@ -162,27 +162,27 @@ public class Progress {
public synchronized void set(float progress) { public synchronized void set(float progress) {
if (Float.isNaN(progress)) { if (Float.isNaN(progress)) {
progress = 0; progress = 0;
LOG.warn("Illegal progress value found, progress is Float.NaN. " + LOG.debug("Illegal progress value found, progress is Float.NaN. " +
"Progress will be changed to 0"); "Progress will be changed to 0");
} }
else if (progress == Float.NEGATIVE_INFINITY) { else if (progress == Float.NEGATIVE_INFINITY) {
progress = 0; progress = 0;
LOG.warn("Illegal progress value found, progress is " + LOG.debug("Illegal progress value found, progress is " +
"Float.NEGATIVE_INFINITY. Progress will be changed to 0"); "Float.NEGATIVE_INFINITY. Progress will be changed to 0");
} }
else if (progress < 0) { else if (progress < 0) {
progress = 0; progress = 0;
LOG.warn("Illegal progress value found, progress is less than 0." + LOG.debug("Illegal progress value found, progress is less than 0." +
" Progress will be changed to 0"); " Progress will be changed to 0");
} }
else if (progress > 1) { else if (progress > 1) {
progress = 1; progress = 1;
LOG.warn("Illegal progress value found, progress is larger than 1." + LOG.debug("Illegal progress value found, progress is larger than 1." +
" Progress will be changed to 1"); " Progress will be changed to 1");
} }
else if (progress == Float.POSITIVE_INFINITY) { else if (progress == Float.POSITIVE_INFINITY) {
progress = 1; progress = 1;
LOG.warn("Illegal progress value found, progress is " + LOG.debug("Illegal progress value found, progress is " +
"Float.POSITIVE_INFINITY. Progress will be changed to 1"); "Float.POSITIVE_INFINITY. Progress will be changed to 1");
} }
this.progress = progress; this.progress = progress;

View File

@ -15,7 +15,5 @@
org.apache.hadoop.fs.LocalFileSystem org.apache.hadoop.fs.LocalFileSystem
org.apache.hadoop.fs.viewfs.ViewFileSystem org.apache.hadoop.fs.viewfs.ViewFileSystem
org.apache.hadoop.fs.s3.S3FileSystem
org.apache.hadoop.fs.s3native.NativeS3FileSystem
org.apache.hadoop.fs.ftp.FTPFileSystem org.apache.hadoop.fs.ftp.FTPFileSystem
org.apache.hadoop.fs.HarFileSystem org.apache.hadoop.fs.HarFileSystem

View File

@ -478,6 +478,21 @@ public class TestProxyUsers {
assertNotAuthorized(proxyUserUgi, "1.2.3.5"); assertNotAuthorized(proxyUserUgi, "1.2.3.5");
} }
@Test
public void testNoHostsForUsers() throws Exception {
Configuration conf = new Configuration(false);
conf.set("y." + REAL_USER_NAME + ".users",
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "y");
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// IP doesn't matter
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
}
private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) { private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) {
try { try {

View File

@ -795,6 +795,23 @@ public class TestWebDelegationToken {
jetty.start(); jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar"); final URL url = new URL(getJettyURL() + "/foo/bar");
// proxyuser using raw HTTP, verifying doAs is case insensitive
String strUrl = String.format("%s?user.name=%s&doas=%s",
url.toExternalForm(), FOO_USER, OK_USER);
HttpURLConnection conn =
(HttpURLConnection) new URL(strUrl).openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
strUrl = String.format("%s?user.name=%s&DOAS=%s", url.toExternalForm(),
FOO_USER, OK_USER);
conn = (HttpURLConnection) new URL(strUrl).openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() { ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override @Override

View File

@ -249,7 +249,7 @@ public class TestGenericOptionsParser extends TestCase {
creds.writeTokenStorageFile(tmpPath, conf); creds.writeTokenStorageFile(tmpPath, conf);
new GenericOptionsParser(conf, args); new GenericOptionsParser(conf, args);
String fileName = conf.get("mapreduce.job.credentials.json"); String fileName = conf.get("mapreduce.job.credentials.binary");
assertNotNull("files is null", fileName); assertNotNull("files is null", fileName);
assertEquals("files option does not match", tmpPath.toString(), fileName); assertEquals("files option does not match", tmpPath.toString(), fileName);

View File

@ -17,6 +17,9 @@
*/ */
package org.apache.hadoop.util; package org.apache.hadoop.util;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ExitUtil.ExitException;
@ -51,4 +54,30 @@ public class TestNativeLibraryChecker extends TestCase {
} }
} }
@Test
public void testNativeLibraryCheckerOutput(){
expectOutput(new String[]{"-a"});
// no argument
expectOutput(new String[0]);
}
private void expectOutput(String [] args) {
ExitUtil.disableSystemExit();
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream originalPs = System.out;
System.setOut(new PrintStream(outContent));
try {
NativeLibraryChecker.main(args);
} catch (ExitException e) {
ExitUtil.resetFirstExitException();
} finally {
if (Shell.WINDOWS) {
assertEquals(outContent.toString().indexOf("winutils: true") != -1, true);
}
if (NativeCodeLoader.isNativeCodeLoaded()) {
assertEquals(outContent.toString().indexOf("hadoop: true") != -1, true);
}
System.setOut(originalPs);
}
}
} }

View File

@ -94,4 +94,42 @@
ACL for decrypt EncryptedKey CryptoExtension operations ACL for decrypt EncryptedKey CryptoExtension operations
</description> </description>
</property> </property>
<property>
<name>default.key.acl.MANAGEMENT</name>
<value>*</value>
<description>
default ACL for MANAGEMENT operations for all key acls that are not
explicitly defined.
</description>
</property>
<property>
<name>default.key.acl.GENERATE_EEK</name>
<value>*</value>
<description>
default ACL for GENERATE_EEK operations for all key acls that are not
explicitly defined.
</description>
</property>
<property>
<name>default.key.acl.DECRYPT_EEK</name>
<value>*</value>
<description>
default ACL for DECRYPT_EEK operations for all key acls that are not
explicitly defined.
</description>
</property>
<property>
<name>default.key.acl.READ</name>
<value>*</value>
<description>
default ACL for READ operations for all key acls that are not
explicitly defined.
</description>
</property>
</configuration> </configuration>

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp; import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyACLs;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
@ -32,6 +34,7 @@ import java.util.Map;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
/** /**
* Provides access to the <code>AccessControlList</code>s used by KMS, * Provides access to the <code>AccessControlList</code>s used by KMS,
@ -39,7 +42,7 @@ import java.util.concurrent.TimeUnit;
* are defined has been updated. * are defined has been updated.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class KMSACLs implements Runnable { public class KMSACLs implements Runnable, KeyACLs {
private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class); private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
private static final String UNAUTHORIZED_MSG_WITH_KEY = private static final String UNAUTHORIZED_MSG_WITH_KEY =
@ -67,6 +70,9 @@ public class KMSACLs implements Runnable {
private volatile Map<Type, AccessControlList> acls; private volatile Map<Type, AccessControlList> acls;
private volatile Map<Type, AccessControlList> blacklistedAcls; private volatile Map<Type, AccessControlList> blacklistedAcls;
private volatile Map<String, HashMap<KeyOpType, AccessControlList>> keyAcls;
private final Map<KeyOpType, AccessControlList> defaultKeyAcls =
new HashMap<KeyOpType, AccessControlList>();
private ScheduledExecutorService executorService; private ScheduledExecutorService executorService;
private long lastReload; private long lastReload;
@ -74,14 +80,15 @@ public class KMSACLs implements Runnable {
if (conf == null) { if (conf == null) {
conf = loadACLs(); conf = loadACLs();
} }
setACLs(conf); setKMSACLs(conf);
setKeyACLs(conf);
} }
public KMSACLs() { public KMSACLs() {
this(null); this(null);
} }
private void setACLs(Configuration conf) { private void setKMSACLs(Configuration conf) {
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>(); Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>(); Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>();
for (Type aclType : Type.values()) { for (Type aclType : Type.values()) {
@ -99,14 +106,69 @@ public class KMSACLs implements Runnable {
blacklistedAcls = tempBlacklist; blacklistedAcls = tempBlacklist;
} }
private void setKeyACLs(Configuration conf) {
Map<String, HashMap<KeyOpType, AccessControlList>> tempKeyAcls =
new HashMap<String, HashMap<KeyOpType,AccessControlList>>();
Map<String, String> allKeyACLS =
conf.getValByRegex(Pattern.quote(KMSConfiguration.KEY_ACL_PREFIX));
for (Map.Entry<String, String> keyAcl : allKeyACLS.entrySet()) {
String k = keyAcl.getKey();
// this should be of type "key.acl.<KEY_NAME>.<OP_TYPE>"
int keyNameStarts = KMSConfiguration.KEY_ACL_PREFIX.length();
int keyNameEnds = k.lastIndexOf(".");
if (keyNameStarts >= keyNameEnds) {
LOG.warn("Invalid key name '{}'", k);
} else {
String aclStr = keyAcl.getValue();
String keyName = k.substring(keyNameStarts, keyNameEnds);
String keyOp = k.substring(keyNameEnds + 1);
KeyOpType aclType = null;
try {
aclType = KeyOpType.valueOf(keyOp);
} catch (IllegalArgumentException e) {
LOG.warn("Invalid key Operation '{}'", keyOp);
}
if (aclType != null) {
// On the assumption this will be single threaded.. else we need to
// ConcurrentHashMap
HashMap<KeyOpType,AccessControlList> aclMap =
tempKeyAcls.get(keyName);
if (aclMap == null) {
aclMap = new HashMap<KeyOpType, AccessControlList>();
tempKeyAcls.put(keyName, aclMap);
}
aclMap.put(aclType, new AccessControlList(aclStr));
LOG.info("KEY_NAME '{}' KEY_OP '{}' ACL '{}'",
keyName, aclType, aclStr);
}
}
}
keyAcls = tempKeyAcls;
for (KeyOpType keyOp : KeyOpType.values()) {
if (!defaultKeyAcls.containsKey(keyOp)) {
String confKey = KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + keyOp;
String aclStr = conf.get(confKey);
if (aclStr != null) {
if (aclStr.equals("*")) {
LOG.info("Default Key ACL for KEY_OP '{}' is set to '*'", keyOp);
}
defaultKeyAcls.put(keyOp, new AccessControlList(aclStr));
}
}
}
}
@Override @Override
public void run() { public void run() {
try { try {
if (KMSConfiguration.isACLsFileNewer(lastReload)) { if (KMSConfiguration.isACLsFileNewer(lastReload)) {
setACLs(loadACLs()); setKMSACLs(loadACLs());
setKeyACLs(loadACLs());
} }
} catch (Exception ex) { } catch (Exception ex) {
LOG.warn("Could not reload ACLs file: " + ex.toString(), ex); LOG.warn(
String.format("Could not reload ACLs file: '%s'", ex.toString()), ex);
} }
} }
@ -164,4 +226,29 @@ public class KMSACLs implements Runnable {
} }
} }
@Override
public boolean hasAccessToKey(String keyName, UserGroupInformation ugi,
KeyOpType opType) {
Map<KeyOpType, AccessControlList> keyAcl = keyAcls.get(keyName);
if (keyAcl == null) {
// Get KeyAcl map of DEFAULT KEY.
keyAcl = defaultKeyAcls;
}
// If No key acl defined for this key, check to see if
// there are key defaults configured for this operation
AccessControlList acl = keyAcl.get(opType);
if (acl == null) {
// If no acl is specified for this operation,
// deny access
return false;
} else {
return acl.isUserAllowed(ugi);
}
}
@Override
public boolean isACLPresent(String keyName, KeyOpType opType) {
return (keyAcls.containsKey(keyName) || defaultKeyAcls.containsKey(opType));
}
} }

View File

@ -36,6 +36,9 @@ public class KMSConfiguration {
public static final String CONFIG_PREFIX = "hadoop.kms."; public static final String CONFIG_PREFIX = "hadoop.kms.";
public static final String KEY_ACL_PREFIX = "key.acl.";
public static final String DEFAULT_KEY_ACL_PREFIX = "default.key.acl.";
// Property to Enable/Disable Caching // Property to Enable/Disable Caching
public static final String KEY_CACHE_ENABLE = CONFIG_PREFIX + public static final String KEY_CACHE_ENABLE = CONFIG_PREFIX +
"cache.enable"; "cache.enable";
@ -57,6 +60,12 @@ public class KMSConfiguration {
// 10 secs // 10 secs
public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000; public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000;
// Property to Enable/Disable per Key authorization
public static final String KEY_AUTHORIZATION_ENABLE = CONFIG_PREFIX +
"key.authorization.enable";
public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
static Configuration getConfiguration(boolean loadHadoopDefaults, static Configuration getConfiguration(boolean loadHadoopDefaults,
String ... resources) { String ... resources) {
Configuration conf = new Configuration(loadHadoopDefaults); Configuration conf = new Configuration(loadHadoopDefaults);

View File

@ -68,7 +68,7 @@ public class KMSWebApp implements ServletContextListener {
private JmxReporter jmxReporter; private JmxReporter jmxReporter;
private static Configuration kmsConf; private static Configuration kmsConf;
private static KMSACLs acls; private static KMSACLs kmsAcls;
private static Meter adminCallsMeter; private static Meter adminCallsMeter;
private static Meter keyCallsMeter; private static Meter keyCallsMeter;
private static Meter unauthorizedCallsMeter; private static Meter unauthorizedCallsMeter;
@ -126,8 +126,8 @@ public class KMSWebApp implements ServletContextListener {
LOG.info(" KMS Hadoop Version: " + VersionInfo.getVersion()); LOG.info(" KMS Hadoop Version: " + VersionInfo.getVersion());
LOG.info("-------------------------------------------------------------"); LOG.info("-------------------------------------------------------------");
acls = new KMSACLs(); kmsAcls = new KMSACLs();
acls.startReloader(); kmsAcls.startReloader();
metricRegistry = new MetricRegistry(); metricRegistry = new MetricRegistry();
jmxReporter = JmxReporter.forRegistry(metricRegistry).build(); jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
@ -188,6 +188,13 @@ public class KMSWebApp implements ServletContextListener {
keyProviderCryptoExtension = keyProviderCryptoExtension =
new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf, new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf,
keyProviderCryptoExtension); keyProviderCryptoExtension);
if (kmsConf.getBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE,
KMSConfiguration.KEY_AUTHORIZATION_ENABLE_DEFAULT)) {
keyProviderCryptoExtension =
new KeyAuthorizationKeyProvider(
keyProviderCryptoExtension, kmsAcls);
}
LOG.info("Initialized KeyProviderCryptoExtension " LOG.info("Initialized KeyProviderCryptoExtension "
+ keyProviderCryptoExtension); + keyProviderCryptoExtension);
final int defaultBitlength = kmsConf final int defaultBitlength = kmsConf
@ -213,7 +220,7 @@ public class KMSWebApp implements ServletContextListener {
@Override @Override
public void contextDestroyed(ServletContextEvent sce) { public void contextDestroyed(ServletContextEvent sce) {
kmsAudit.shutdown(); kmsAudit.shutdown();
acls.stopReloader(); kmsAcls.stopReloader();
jmxReporter.stop(); jmxReporter.stop();
jmxReporter.close(); jmxReporter.close();
metricRegistry = null; metricRegistry = null;
@ -225,7 +232,7 @@ public class KMSWebApp implements ServletContextListener {
} }
public static KMSACLs getACLs() { public static KMSACLs getACLs() {
return acls; return kmsAcls;
} }
public static Meter getAdminCallsMeter() { public static Meter getAdminCallsMeter() {

View File

@ -0,0 +1,276 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
/**
* A {@link KeyProvider} proxy that checks whether the current user derived via
* {@link UserGroupInformation}, is authorized to perform the following
* type of operations on a Key :
* <ol>
* <li>MANAGEMENT operations : createKey, rollNewVersion, deleteKey</li>
* <li>GENERATE_EEK operations : generateEncryptedKey, warmUpEncryptedKeys</li>
* <li>DECRYPT_EEK operation : decryptEncryptedKey</li>
* <li>READ operations : getKeyVersion, getKeyVersions, getMetadata,
* getKeysMetadata, getCurrentKey</li>
* </ol>
* The read operations (getCurrentKeyVersion / getMetadata) etc are not checked.
*/
public class KeyAuthorizationKeyProvider extends KeyProviderCryptoExtension {
public static final String KEY_ACL = "key.acl.";
private static final String KEY_ACL_NAME = KEY_ACL + "name";
public enum KeyOpType {
ALL, READ, MANAGEMENT, GENERATE_EEK, DECRYPT_EEK;
}
/**
* Interface that needs to be implemented by a client of the
* <code>KeyAuthorizationKeyProvider</code>.
*/
public static interface KeyACLs {
/**
* This is called by the KeyProvider to check if the given user is
* authorized to perform the specified operation on the given acl name.
* @param aclName name of the key ACL
* @param ugi User's UserGroupInformation
* @param opType Operation Type
* @return true if user has access to the aclName and opType else false
*/
public boolean hasAccessToKey(String aclName, UserGroupInformation ugi,
KeyOpType opType);
/**
*
* @param aclName ACL name
* @param opType Operation Type
* @return true if AclName exists else false
*/
public boolean isACLPresent(String aclName, KeyOpType opType);
}
private final KeyProviderCryptoExtension provider;
private final KeyACLs acls;
/**
* The constructor takes a {@link KeyProviderCryptoExtension} and an
* implementation of <code>KeyACLs</code>. All calls are delegated to the
* provider keyProvider after authorization check (if required)
* @param keyProvider
* @param acls
*/
public KeyAuthorizationKeyProvider(KeyProviderCryptoExtension keyProvider,
KeyACLs acls) {
super(keyProvider, null);
this.provider = keyProvider;
this.acls = acls;
}
// This method first checks if "key.acl.name" attribute is present as an
// attribute in the provider Options. If yes, use the aclName for any
// subsequent access checks, else use the keyName as the aclName and set it
// as the value of the "key.acl.name" in the key's metadata.
private void authorizeCreateKey(String keyName, Options options,
UserGroupInformation ugi) throws IOException{
Preconditions.checkNotNull(ugi, "UserGroupInformation cannot be null");
Map<String, String> attributes = options.getAttributes();
String aclName = attributes.get(KEY_ACL_NAME);
boolean success = false;
if (Strings.isNullOrEmpty(aclName)) {
if (acls.isACLPresent(keyName, KeyOpType.MANAGEMENT)) {
options.setAttributes(ImmutableMap.<String, String> builder()
.putAll(attributes).put(KEY_ACL_NAME, keyName).build());
success =
acls.hasAccessToKey(keyName, ugi, KeyOpType.MANAGEMENT)
|| acls.hasAccessToKey(keyName, ugi, KeyOpType.ALL);
} else {
success = false;
}
} else {
success = acls.isACLPresent(aclName, KeyOpType.MANAGEMENT) &&
(acls.hasAccessToKey(aclName, ugi, KeyOpType.MANAGEMENT)
|| acls.hasAccessToKey(aclName, ugi, KeyOpType.ALL));
}
if (!success)
throw new AuthorizationException(String.format("User [%s] is not"
+ " authorized to create key !!", ugi.getShortUserName()));
}
private void checkAccess(String aclName, UserGroupInformation ugi,
KeyOpType opType) throws AuthorizationException {
Preconditions.checkNotNull(aclName, "Key ACL name cannot be null");
Preconditions.checkNotNull(ugi, "UserGroupInformation cannot be null");
if (acls.isACLPresent(aclName, KeyOpType.MANAGEMENT) &&
(acls.hasAccessToKey(aclName, ugi, opType)
|| acls.hasAccessToKey(aclName, ugi, KeyOpType.ALL))) {
return;
} else {
throw new AuthorizationException(String.format("User [%s] is not"
+ " authorized to perform [%s] on key with ACL name [%s]!!",
ugi.getShortUserName(), opType, aclName));
}
}
@Override
public KeyVersion createKey(String name, Options options)
throws NoSuchAlgorithmException, IOException {
authorizeCreateKey(name, options, getUser());
return provider.createKey(name, options);
}
@Override
public KeyVersion createKey(String name, byte[] material, Options options)
throws IOException {
authorizeCreateKey(name, options, getUser());
return provider.createKey(name, material, options);
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
doAccessCheck(name, KeyOpType.MANAGEMENT);
return provider.rollNewVersion(name);
}
@Override
public void deleteKey(String name) throws IOException {
doAccessCheck(name, KeyOpType.MANAGEMENT);
provider.deleteKey(name);
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
doAccessCheck(name, KeyOpType.MANAGEMENT);
return provider.rollNewVersion(name, material);
}
@Override
public void warmUpEncryptedKeys(String... names) throws IOException {
for (String name : names) {
doAccessCheck(name, KeyOpType.GENERATE_EEK);
}
provider.warmUpEncryptedKeys(names);
}
@Override
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException, GeneralSecurityException {
doAccessCheck(encryptionKeyName, KeyOpType.GENERATE_EEK);
return provider.generateEncryptedKey(encryptionKeyName);
}
@Override
public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion)
throws IOException, GeneralSecurityException {
doAccessCheck(
encryptedKeyVersion.getEncryptionKeyName(), KeyOpType.DECRYPT_EEK);
return provider.decryptEncryptedKey(encryptedKeyVersion);
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
KeyVersion keyVersion = provider.getKeyVersion(versionName);
if (keyVersion != null) {
doAccessCheck(keyVersion.getName(), KeyOpType.READ);
}
return keyVersion;
}
@Override
public List<String> getKeys() throws IOException {
return provider.getKeys();
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
doAccessCheck(name, KeyOpType.READ);
return provider.getKeyVersions(name);
}
@Override
public Metadata getMetadata(String name) throws IOException {
doAccessCheck(name, KeyOpType.READ);
return provider.getMetadata(name);
}
@Override
public Metadata[] getKeysMetadata(String... names) throws IOException {
for (String name : names) {
doAccessCheck(name, KeyOpType.READ);
}
return provider.getKeysMetadata(names);
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
doAccessCheck(name, KeyOpType.READ);
return provider.getCurrentKey(name);
}
@Override
public void flush() throws IOException {
provider.flush();
}
@Override
public boolean isTransient() {
return provider.isTransient();
}
private void doAccessCheck(String keyName, KeyOpType opType) throws
IOException {
Metadata metadata = provider.getMetadata(keyName);
if (metadata != null) {
String aclName = metadata.getAttributes().get(KEY_ACL_NAME);
checkAccess((aclName == null) ? keyName : aclName, getUser(), opType);
}
}
private UserGroupInformation getUser() throws IOException {
return UserGroupInformation.getCurrentUser();
}
@Override
protected KeyProvider getKeyProvider() {
return this;
}
@Override
public String toString() {
return provider.toString();
}
}

View File

@ -443,6 +443,112 @@ $ keytool -genkey -alias tomcat -keyalg RSA
+---+ +---+
*** Key Access Control
KMS supports access control for all non-read operations at the Key level.
All Key Access operations are classified as :
* MANAGEMENT - createKey, deleteKey, rolloverNewVersion
* GENERATE_EEK - generateEncryptedKey, warmUpEncryptedKeys
* DECRYPT_EEK - decryptEncryptedKey;
* READ - getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata,
getCurrentKey;
* ALL - all of the above;
These can be defined in the KMS <<<etc/hadoop/kms-acls.xml>>> as follows
For all keys for which a key access has not been explicitly configured, It
is possible to configure a default key access control for a subset of the
operation types.
If no ACL is configured for a specific key AND no default ACL is configured
for the requested operation, then access will be DENIED.
<<NOTE:>> The default ACL does not support <<<ALL>>> operation qualifier.
+---+
<property>
<name>key.acl.testKey1.MANAGEMENT</name>
<value>*</value>
<description>
ACL for create-key, deleteKey and rolloverNewVersion operations.
</description>
</property>
<property>
<name>key.acl.testKey2.GENERATE_EEK</name>
<value>*</value>
<description>
ACL for generateEncryptedKey operations.
</description>
</property>
<property>
<name>key.acl.testKey3.DECRYPT_EEK</name>
<value>*</value>
<description>
ACL for decryptEncryptedKey operations.
</description>
</property>
<property>
<name>key.acl.testKey4.READ</name>
<value>*</value>
<description>
ACL for getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata,
getCurrentKey operations
</description>
</property>
<property>
<name>key.acl.testKey5.ALL</name>
<value>*</value>
<description>
ACL for ALL operations.
</description>
</property>
<property>
<name>default.key.acl.MANAGEMENT</name>
<value>user1,user2</value>
<description>
default ACL for MANAGEMENT operations for all keys that are not
explicitly defined.
</description>
</property>
<property>
<name>default.key.acl.GENERATE_EEK</name>
<value>user1,user2</value>
<description>
default ACL for GENERATE_EEK operations for all keys that are not
explicitly defined.
</description>
</property>
<property>
<name>default.key.acl.DECRYPT_EEK</name>
<value>user1,user2</value>
<description>
default ACL for DECRYPT_EEK operations for all keys that are not
explicitly defined.
</description>
</property>
<property>
<name>default.key.acl.READ</name>
<value>user1,user2</value>
<description>
default ACL for READ operations for all keys that are not
explicitly defined.
</description>
</property>
+---+
** KMS Delegation Token Configuration ** KMS Delegation Token Configuration
KMS delegation token secret manager can be configured with the following KMS delegation token secret manager can be configured with the following

View File

@ -18,8 +18,10 @@
package org.apache.hadoop.crypto.key.kms.server; package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
@ -338,6 +340,13 @@ public class TestKMS {
UserGroupInformation.setConfiguration(conf); UserGroupInformation.setConfiguration(conf);
File confDir = getTestDir(); File confDir = getTestDir();
conf = createBaseKMSConf(confDir); conf = createBaseKMSConf(confDir);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.MANAGEMENT", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.READ", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k4.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k5.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k6.ALL", "*");
writeConf(confDir, conf); writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable() { runServer(null, null, confDir, new KMSCallable() {
@ -492,10 +501,20 @@ public class TestKMS {
options = new KeyProvider.Options(conf); options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding"); options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128); options.setBitLength(128);
kp.createKey("k2", options); KeyVersion kVer2 = kp.createKey("k2", options);
KeyProvider.Metadata meta = kp.getMetadata("k2"); KeyProvider.Metadata meta = kp.getMetadata("k2");
Assert.assertNull(meta.getDescription()); Assert.assertNull(meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty()); Assert.assertEquals("k2", meta.getAttributes().get("key.acl.name"));
// test key ACL.. k2 is granted only MANAGEMENT Op access
try {
kpExt =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpExt.generateEncryptedKey(kVer2.getName());
Assert.fail("User should not be allowed to encrypt !!");
} catch (Exception ex) {
//
}
// createKey() description, no tags // createKey() description, no tags
options = new KeyProvider.Options(conf); options = new KeyProvider.Options(conf);
@ -505,7 +524,7 @@ public class TestKMS {
kp.createKey("k3", options); kp.createKey("k3", options);
meta = kp.getMetadata("k3"); meta = kp.getMetadata("k3");
Assert.assertEquals("d", meta.getDescription()); Assert.assertEquals("d", meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty()); Assert.assertEquals("k3", meta.getAttributes().get("key.acl.name"));
Map<String, String> attributes = new HashMap<String, String>(); Map<String, String> attributes = new HashMap<String, String>();
attributes.put("a", "A"); attributes.put("a", "A");
@ -514,6 +533,7 @@ public class TestKMS {
options = new KeyProvider.Options(conf); options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding"); options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128); options.setBitLength(128);
attributes.put("key.acl.name", "k4");
options.setAttributes(attributes); options.setAttributes(attributes);
kp.createKey("k4", options); kp.createKey("k4", options);
meta = kp.getMetadata("k4"); meta = kp.getMetadata("k4");
@ -525,6 +545,7 @@ public class TestKMS {
options.setCipher("AES/CTR/NoPadding"); options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128); options.setBitLength(128);
options.setDescription("d"); options.setDescription("d");
attributes.put("key.acl.name", "k5");
options.setAttributes(attributes); options.setAttributes(attributes);
kp.createKey("k5", options); kp.createKey("k5", options);
meta = kp.getMetadata("k5"); meta = kp.getMetadata("k5");
@ -564,6 +585,201 @@ public class TestKMS {
}); });
} }
@Test
public void testKeyACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK");
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key.MANAGEMENT", "CREATE");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "all_access.ALL", "GENERATE_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "all_access.DECRYPT_EEK", "ROLLOVER");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "ROLLOVER");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("k0", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("k0");
Assert.assertNull(rollVersion.getMaterial());
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k0");
Assert.fail("User [CREATE] should not be allowed to generate_eek on k0");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [CREATE] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key2");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("k1", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("k1");
Assert.assertNull(rollVersion.getMaterial());
try {
kp.rollNewVersion("k0");
Assert.fail("User [ROLLOVER] should not be allowed to rollover k0");
} catch (Exception e) {
// Ignore
}
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k1");
Assert.fail("User [ROLLOVER] should not be allowed to generate_eek on k1");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [ROLLOVER] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key");
options.setAttributes(newAttribs);
try {
kp.createKey("k2", options);
Assert.fail("User [GET] should not be allowed to create key..");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [GET] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
final EncryptedKeyVersion ekv = doAs("GENERATE_EEK", new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
kp.createKey("kx", options);
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
return kpce.generateEncryptedKey("kx");
} catch (Exception e) {
Assert.fail("User [GENERATE_EEK] should be allowed to generate_eek on kx");
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try {
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpce.decryptEncryptedKey(ekv);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
}
@Test @Test
public void testACLs() throws Exception { public void testACLs() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
@ -586,6 +802,9 @@ public class TestKMS {
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(), conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL"); KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
writeConf(testDir, conf); writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() { runServer(null, null, testDir, new KMSCallable() {
@ -891,6 +1110,9 @@ public class TestKMS {
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin"); conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin"); conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "ck0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "ck1.ALL", "*");
writeConf(testDir, conf); writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() { runServer(null, null, testDir, new KMSCallable() {
@ -973,6 +1195,7 @@ public class TestKMS {
conf.set(type.getAclConfigKey(), " "); conf.set(type.getAclConfigKey(), " ");
} }
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client"); conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "client,client/host");
writeConf(testDir, conf); writeConf(testDir, conf);
@ -1096,6 +1319,9 @@ public class TestKMS {
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kA.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kD.ALL", "*");
writeConf(testDir, conf); writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() { runServer(null, null, testDir, new KMSCallable() {
@ -1164,6 +1390,10 @@ public class TestKMS {
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.proxyuser.client.users", "foo"); conf.set("hadoop.kms.proxyuser.client.users", "foo");
conf.set("hadoop.kms.proxyuser.client.hosts", "*"); conf.set("hadoop.kms.proxyuser.client.hosts", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kAA.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kBB.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kCC.ALL", "*");
writeConf(testDir, conf); writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() { runServer(null, null, testDir, new KMSCallable() {

View File

@ -0,0 +1,218 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.security.SecureRandom;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.UserProvider;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyACLs;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
public class TestKeyAuthorizationKeyProvider {
private static final String CIPHER = "AES";
@Test
public void testCreateKey() throws Exception {
final Configuration conf = new Configuration();
KeyProvider kp =
new UserProvider.Factory().createProvider(new URI("user:///"), conf);
KeyACLs mock = mock(KeyACLs.class);
when(mock.isACLPresent("foo", KeyOpType.MANAGEMENT)).thenReturn(true);
UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1");
when(mock.hasAccessToKey("foo", u1, KeyOpType.MANAGEMENT)).thenReturn(true);
final KeyProviderCryptoExtension kpExt =
new KeyAuthorizationKeyProvider(
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp),
mock);
u1.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kpExt.createKey("foo", SecureRandom.getSeed(16),
newOptions(conf));
} catch (IOException ioe) {
Assert.fail("User should be Authorized !!");
}
// "bar" key not configured
try {
kpExt.createKey("bar", SecureRandom.getSeed(16),
newOptions(conf));
Assert.fail("User should NOT be Authorized !!");
} catch (IOException ioe) {
// Ignore
}
return null;
}
}
);
// Unauthorized User
UserGroupInformation.createRemoteUser("badGuy").doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kpExt.createKey("foo", SecureRandom.getSeed(16),
newOptions(conf));
Assert.fail("User should NOT be Authorized !!");
} catch (IOException ioe) {
// Ignore
}
return null;
}
}
);
}
@Test
public void testOpsWhenACLAttributeExists() throws Exception {
final Configuration conf = new Configuration();
KeyProvider kp =
new UserProvider.Factory().createProvider(new URI("user:///"), conf);
KeyACLs mock = mock(KeyACLs.class);
when(mock.isACLPresent("testKey", KeyOpType.MANAGEMENT)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.GENERATE_EEK)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.DECRYPT_EEK)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.ALL)).thenReturn(true);
UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1");
UserGroupInformation u2 = UserGroupInformation.createRemoteUser("u2");
UserGroupInformation u3 = UserGroupInformation.createRemoteUser("u3");
UserGroupInformation sudo = UserGroupInformation.createRemoteUser("sudo");
when(mock.hasAccessToKey("testKey", u1, KeyOpType.MANAGEMENT)).thenReturn(true);
when(mock.hasAccessToKey("testKey", u2, KeyOpType.GENERATE_EEK)).thenReturn(true);
when(mock.hasAccessToKey("testKey", u3, KeyOpType.DECRYPT_EEK)).thenReturn(true);
when(mock.hasAccessToKey("testKey", sudo, KeyOpType.ALL)).thenReturn(true);
final KeyProviderCryptoExtension kpExt =
new KeyAuthorizationKeyProvider(
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp),
mock);
final KeyVersion barKv = u1.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
Options opt = newOptions(conf);
Map<String, String> m = new HashMap<String, String>();
m.put("key.acl.name", "testKey");
opt.setAttributes(m);
try {
KeyVersion kv =
kpExt.createKey("foo", SecureRandom.getSeed(16), opt);
kpExt.rollNewVersion(kv.getName());
kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16));
kpExt.deleteKey(kv.getName());
} catch (IOException ioe) {
Assert.fail("User should be Authorized !!");
}
KeyVersion retkv = null;
try {
retkv = kpExt.createKey("bar", SecureRandom.getSeed(16), opt);
kpExt.generateEncryptedKey(retkv.getName());
Assert.fail("User should NOT be Authorized to generate EEK !!");
} catch (IOException ioe) {
}
Assert.assertNotNull(retkv);
return retkv;
}
}
);
final EncryptedKeyVersion barEKv =
u2.doAs(
new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
try {
kpExt.deleteKey(barKv.getName());
Assert.fail("User should NOT be Authorized to "
+ "perform any other operation !!");
} catch (IOException ioe) {
}
return kpExt.generateEncryptedKey(barKv.getName());
}
});
u3.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
try {
kpExt.deleteKey(barKv.getName());
Assert.fail("User should NOT be Authorized to "
+ "perform any other operation !!");
} catch (IOException ioe) {
}
return kpExt.decryptEncryptedKey(barEKv);
}
});
sudo.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Options opt = newOptions(conf);
Map<String, String> m = new HashMap<String, String>();
m.put("key.acl.name", "testKey");
opt.setAttributes(m);
try {
KeyVersion kv =
kpExt.createKey("foo", SecureRandom.getSeed(16), opt);
kpExt.rollNewVersion(kv.getName());
kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16));
EncryptedKeyVersion ekv = kpExt.generateEncryptedKey(kv.getName());
kpExt.decryptEncryptedKey(ekv);
kpExt.deleteKey(kv.getName());
} catch (IOException ioe) {
Assert.fail("User should be Allowed to do everything !!");
}
return null;
}
}
);
}
private static KeyProvider.Options newOptions(Configuration conf) {
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher(CIPHER);
options.setBitLength(128);
return options;
}
}

View File

@ -635,6 +635,15 @@ Release 2.6.0 - UNRELEASED
HDFS-6506. Newly moved block replica been invalidated and deleted in HDFS-6506. Newly moved block replica been invalidated and deleted in
TestBalancer. (Binglin Chang via cnauroth) TestBalancer. (Binglin Chang via cnauroth)
HDFS-6966. Add additional unit tests for encryption zones.
(Stephen Chu via wang)
HDFS-6621. Hadoop Balancer prematurely exits iterations.
(Rafal Wojdyla and Benjamin Bowman via wang)
HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
(Yi Liu via wang)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an HDFS-6387. HDFS CLI admin tool for creating & deleting an
@ -739,7 +748,16 @@ Release 2.6.0 - UNRELEASED
HDFS-6986. DistributedFileSystem must get delegation tokens from configured HDFS-6986. DistributedFileSystem must get delegation tokens from configured
KeyProvider. (zhz via tucu) KeyProvider. (zhz via tucu)
Release 2.5.1 - UNRELEASED HDFS-6776. Using distcp to copy data between insecure and secure cluster via webdhfs
doesn't work. (yzhangal via tucu)
HDFS-7042. Upgrade fails for Windows HA cluster due to file locks held during
rename in JournalNode. (cnauroth)
HDFS-7051. TestDataNodeRollingUpgrade#isBlockFileInPrevious assumes Unix file
path separator. (cnauroth)
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -988,6 +988,9 @@ public class Journal implements Closeable {
} }
public synchronized void doPreUpgrade() throws IOException { public synchronized void doPreUpgrade() throws IOException {
// Do not hold file lock on committedTxnId, because the containing
// directory will be renamed. It will be reopened lazily on next access.
committedTxnId.close();
storage.getJournalManager().doPreUpgrade(); storage.getJournalManager().doPreUpgrade();
} }
@ -1037,7 +1040,10 @@ public class Journal implements Closeable {
targetLayoutVersion); targetLayoutVersion);
} }
public void doRollback() throws IOException { public synchronized void doRollback() throws IOException {
// Do not hold file lock on committedTxnId, because the containing
// directory will be renamed. It will be reopened lazily on next access.
committedTxnId.close();
storage.getJournalManager().doRollback(); storage.getJournalManager().doRollback();
} }

View File

@ -402,8 +402,7 @@ public class DelegationTokenSecretManager
final Token<DelegationTokenIdentifier> token = namenode.getRpcServer( final Token<DelegationTokenIdentifier> token = namenode.getRpcServer(
).getDelegationToken(new Text(renewer)); ).getDelegationToken(new Text(renewer));
if (token == null) { if (token == null) {
throw new IOException("Failed to get the token for " + renewer return null;
+ ", user=" + ugi.getShortUserName());
} }
final InetSocketAddress addr = namenode.getNameNodeAddress(); final InetSocketAddress addr = namenode.getNameNodeAddress();

View File

@ -654,6 +654,8 @@ public class Dispatcher {
&& (!srcBlocks.isEmpty() || blocksToReceive > 0)) { && (!srcBlocks.isEmpty() || blocksToReceive > 0)) {
final PendingMove p = chooseNextMove(); final PendingMove p = chooseNextMove();
if (p != null) { if (p != null) {
// Reset no pending move counter
noPendingMoveIteration=0;
// move the block // move the block
moveExecutor.execute(new Runnable() { moveExecutor.execute(new Runnable() {
@Override @Override

View File

@ -1853,8 +1853,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); // writelock is needed to set accesstime writeLock(); // writelock is needed to set accesstime
} }
src = resolvePath(src, pathComponents);
try { try {
src = resolvePath(src, pathComponents);
if (isReadOp) { if (isReadOp) {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
} else { } else {

View File

@ -283,6 +283,9 @@ public class NamenodeWebHdfsMethods {
final String renewer) throws IOException { final String renewer) throws IOException {
final Credentials c = DelegationTokenSecretManager.createCredentials( final Credentials c = DelegationTokenSecretManager.createCredentials(
namenode, ugi, renewer != null? renewer: ugi.getShortUserName()); namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
if (c == null) {
return null;
}
final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
: SWebHdfsFileSystem.TOKEN_KIND; : SWebHdfsFileSystem.TOKEN_KIND;

View File

@ -112,6 +112,7 @@ public class BestEffortLongFile implements Closeable {
public void close() throws IOException { public void close() throws IOException {
if (ch != null) { if (ch != null) {
ch.close(); ch.close();
ch = null;
} }
} }
} }

View File

@ -41,6 +41,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.DelegationTokenRenewer; import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -102,6 +103,11 @@ public class WebHdfsFileSystem extends FileSystem
/** Delegation token kind */ /** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
@VisibleForTesting
public static final String CANT_FALLBACK_TO_INSECURE_MSG =
"The client is configured to only allow connecting to secure cluster";
private boolean canRefreshDelegationToken; private boolean canRefreshDelegationToken;
private UserGroupInformation ugi; private UserGroupInformation ugi;
@ -112,6 +118,7 @@ public class WebHdfsFileSystem extends FileSystem
private Path workingDir; private Path workingDir;
private InetSocketAddress nnAddrs[]; private InetSocketAddress nnAddrs[];
private int currentNNAddrIndex; private int currentNNAddrIndex;
private boolean disallowFallbackToInsecureCluster;
/** /**
* Return the protocol scheme for the FileSystem. * Return the protocol scheme for the FileSystem.
@ -194,6 +201,9 @@ public class WebHdfsFileSystem extends FileSystem
this.workingDir = getHomeDirectory(); this.workingDir = getHomeDirectory();
this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled(); this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
this.disallowFallbackToInsecureCluster = !conf.getBoolean(
CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
this.delegationToken = null; this.delegationToken = null;
} }
@ -1293,7 +1303,13 @@ public class WebHdfsFileSystem extends FileSystem
return JsonUtil.toDelegationToken(json); return JsonUtil.toDelegationToken(json);
} }
}.run(); }.run();
if (token != null) {
token.setService(tokenServiceName); token.setService(tokenServiceName);
} else {
if (disallowFallbackToInsecureCluster) {
throw new AccessControlException(CANT_FALLBACK_TO_INSECURE_MSG);
}
}
return token; return token;
} }

View File

@ -1378,7 +1378,8 @@ public class DFSTestUtil {
} }
/** /**
* Helper function to create a key in the Key Provider. * Helper function to create a key in the Key Provider. Defaults
* to the first indexed NameNode's Key Provider.
* *
* @param keyName The name of the key to create * @param keyName The name of the key to create
* @param cluster The cluster to create it in * @param cluster The cluster to create it in
@ -1387,7 +1388,22 @@ public class DFSTestUtil {
public static void createKey(String keyName, MiniDFSCluster cluster, public static void createKey(String keyName, MiniDFSCluster cluster,
Configuration conf) Configuration conf)
throws NoSuchAlgorithmException, IOException { throws NoSuchAlgorithmException, IOException {
KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); createKey(keyName, cluster, 0, conf);
}
/**
* Helper function to create a key in the Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param idx The NameNode index
* @param conf Configuration to use
*/
public static void createKey(String keyName, MiniDFSCluster cluster,
int idx, Configuration conf)
throws NoSuchAlgorithmException, IOException {
NameNode nn = cluster.getNameNode(idx);
KeyProvider provider = nn.getNamesystem().getProvider();
final KeyProvider.Options options = KeyProvider.options(conf); final KeyProvider.Options options = KeyProvider.options(conf);
options.setDescription(keyName); options.setDescription(keyName);
options.setBitLength(128); options.setBitLength(128);

View File

@ -17,8 +17,14 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import java.io.ByteArrayOutputStream;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.io.StringWriter;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
@ -52,10 +58,15 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector; import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension; import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -78,6 +89,11 @@ import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import org.xml.sax.InputSource;
import org.xml.sax.helpers.DefaultHandler;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
public class TestEncryptionZones { public class TestEncryptionZones {
@ -374,6 +390,7 @@ public class TestEncryptionZones {
final Path allPathFile = new Path(allPath, "file1"); final Path allPathFile = new Path(allPath, "file1");
final Path nonEZDir = new Path(testRoot, "nonEZDir"); final Path nonEZDir = new Path(testRoot, "nonEZDir");
final Path nonEZFile = new Path(nonEZDir, "file1"); final Path nonEZFile = new Path(nonEZDir, "file1");
final Path nonexistent = new Path("/nonexistent");
final int len = 8192; final int len = 8192;
fsWrapper.mkdir(testRoot, new FsPermission((short) 0777), true); fsWrapper.mkdir(testRoot, new FsPermission((short) 0777), true);
@ -421,6 +438,9 @@ public class TestEncryptionZones {
assertExceptionContains("Permission denied:", e); assertExceptionContains("Permission denied:", e);
} }
assertNull("expected null for nonexistent path",
userAdmin.getEncryptionZoneForPath(nonexistent));
// Check operation with non-ez paths // Check operation with non-ez paths
assertNull("expected null for non-ez path", assertNull("expected null for non-ez path",
userAdmin.getEncryptionZoneForPath(nonEZDir)); userAdmin.getEncryptionZoneForPath(nonEZDir));
@ -463,9 +483,15 @@ public class TestEncryptionZones {
final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path pathFoo = new Path(testRoot, "foo"); final Path pathFoo = new Path(testRoot, "foo");
final Path pathFooBaz = new Path(pathFoo, "baz"); final Path pathFooBaz = new Path(pathFoo, "baz");
final Path pathFooBazFile = new Path(pathFooBaz, "file");
final Path pathFooBar = new Path(pathFoo, "bar");
final Path pathFooBarFile = new Path(pathFooBar, "file");
final int len = 8192;
wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true); wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(pathFoo, TEST_KEY); dfsAdmin.createEncryptionZone(pathFoo, TEST_KEY);
wrapper.mkdir(pathFooBaz, FsPermission.getDirDefault(), true); wrapper.mkdir(pathFooBaz, FsPermission.getDirDefault(), true);
DFSTestUtil.createFile(fs, pathFooBazFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, pathFooBazFile);
try { try {
wrapper.rename(pathFooBaz, testRoot); wrapper.rename(pathFooBaz, testRoot);
} catch (IOException e) { } catch (IOException e) {
@ -473,6 +499,13 @@ public class TestEncryptionZones {
" an encryption zone.", e " an encryption zone.", e
); );
} }
// Verify that we can rename dir and files within an encryption zone.
assertTrue(fs.rename(pathFooBaz, pathFooBar));
assertTrue("Rename of dir and file within ez failed",
!wrapper.exists(pathFooBaz) && wrapper.exists(pathFooBar));
assertEquals("Renamed file contents not the same",
contents, DFSTestUtil.readFile(fs, pathFooBarFile));
} }
@Test(timeout = 60000) @Test(timeout = 60000)
@ -806,4 +839,158 @@ public class TestEncryptionZones {
Assert.assertEquals(tokens[1], testToken); Assert.assertEquals(tokens[1], testToken);
Assert.assertEquals(1, creds.numberOfTokens()); Assert.assertEquals(1, creds.numberOfTokens());
} }
/**
* Test running fsck on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testFsckOnEncryptionZones() throws Exception {
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
final Path zone1File = new Path(zone1, "file");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ "/" });
assertEquals("Fsck ran with non-zero error code", 0, errCode);
String result = bStream.toString();
assertTrue("Fsck did not return HEALTHY status",
result.contains(NamenodeFsck.HEALTHY_STATUS));
// Run fsck directly on the encryption zone instead of root
errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ zoneParent.toString() });
assertEquals("Fsck ran with non-zero error code", 0, errCode);
result = bStream.toString();
assertTrue("Fsck did not return HEALTHY status",
result.contains(NamenodeFsck.HEALTHY_STATUS));
}
/**
* Test correctness of successive snapshot creation and deletion
* on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testSnapshotsOnEncryptionZones() throws Exception {
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone = new Path(zoneParent, "zone");
final Path zoneFile = new Path(zone, "zoneFile");
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
dfsAdmin.allowSnapshot(zoneParent);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, zoneFile);
final Path snap1 = fs.createSnapshot(zoneParent);
final Path snap1Zone = new Path(snap1, zone.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
// Now delete the encryption zone, recreate the dir, and take another snapshot
fsWrapper.delete(zone, true);
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
final Path snap2 = fs.createSnapshot(zoneParent);
final Path snap2Zone = new Path(snap2, zone.getName());
assertNull("Expected null ez path",
dfsAdmin.getEncryptionZoneForPath(snap2Zone));
// Create the encryption zone again
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
final Path snap3 = fs.createSnapshot(zoneParent);
final Path snap3Zone = new Path(snap3, zone.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
// Verify contents of the snapshotted file
final Path snapshottedZoneFile = new Path(
snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName());
assertEquals("Contents of snapshotted file have changed unexpectedly",
contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
// Now delete the snapshots out of order and verify the zones are still correct
fs.deleteSnapshot(zoneParent, snap2.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
fs.deleteSnapshot(zoneParent, snap1.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
}
/**
* Verify symlinks can be created in encryption zones and that
* they function properly when the target is in the same
* or different ez.
*/
@Test(timeout = 60000)
public void testEncryptionZonesWithSymlinks() throws Exception {
// Verify we can create an encryption zone over both link and target
final int len = 8192;
final Path parent = new Path("/parent");
final Path linkParent = new Path(parent, "symdir1");
final Path targetParent = new Path(parent, "symdir2");
final Path link = new Path(linkParent, "link");
final Path target = new Path(targetParent, "target");
fs.mkdirs(parent);
dfsAdmin.createEncryptionZone(parent, TEST_KEY);
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
String content = DFSTestUtil.readFile(fs, target);
fs.createSymlink(target, link, false);
assertEquals("Contents read from link are not the same as target",
content, DFSTestUtil.readFile(fs, link));
fs.delete(parent, true);
// Now let's test when the symlink and target are in different
// encryption zones
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
dfsAdmin.createEncryptionZone(linkParent, TEST_KEY);
dfsAdmin.createEncryptionZone(targetParent, TEST_KEY);
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
content = DFSTestUtil.readFile(fs, target);
fs.createSymlink(target, link, false);
assertEquals("Contents read from link are not the same as target",
content, DFSTestUtil.readFile(fs, link));
fs.delete(link, true);
fs.delete(target, true);
}
/**
* Test running the OfflineImageViewer on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testOfflineImageViewerOnEncryptionZones() throws Exception {
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
final Path zone1File = new Path(zone1, "file");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
fs.saveNamespace();
File originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
// Run the XML OIV processor
StringWriter output = new StringWriter();
PrintWriter pw = new PrintWriter(output);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), pw);
v.visit(new RandomAccessFile(originalFsimage, "r"));
final String xml = output.getBuffer().toString();
SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
} }

View File

@ -0,0 +1,121 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
/**
* Tests interaction of encryption zones with HA failover.
*/
public class TestEncryptionZonesWithHA {
private Configuration conf;
private MiniDFSCluster cluster;
private NameNode nn0;
private NameNode nn1;
private DistributedFileSystem fs;
private HdfsAdmin dfsAdmin0;
private HdfsAdmin dfsAdmin1;
private FileSystemTestHelper fsHelper;
private File testRootDir;
private final String TEST_KEY = "testKey";
@Before
public void setupCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
fsHelper = new FileSystemTestHelper();
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(1)
.build();
cluster.waitActive();
cluster.transitionToActive(0);
fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
KeyProviderCryptoExtension nn0Provider =
cluster.getNameNode(0).getNamesystem().getProvider();
fs.getClient().provider = nn0Provider;
}
@After
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test that encryption zones are properly tracked by the standby.
*/
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
final int len = 8196;
final Path dir = new Path("/enc");
final Path dirChild = new Path(dir, "child");
final Path dirFile = new Path(dir, "file");
fs.mkdir(dir, FsPermission.getDirDefault());
dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
fs.mkdir(dirChild, FsPermission.getDirDefault());
DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, dirFile);
// Failover the current standby to active.
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
Assert.assertEquals("Got unexpected ez path", dir.toString(),
dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
Assert.assertEquals("Got unexpected ez path", dir.toString(),
dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
Assert.assertEquals("File contents after failover were changed",
contents, DFSTestUtil.readFile(fs, dirFile));
}
}

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -80,4 +81,25 @@ public class TestRead {
testEOF(cluster, 10000); testEOF(cluster, 10000);
cluster.shutdown(); cluster.shutdown();
} }
/**
* Regression test for HDFS-7045.
* If deadlock happen, the test will time out.
* @throws Exception
*/
@Test(timeout=60000)
public void testReadReservedPath() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).format(true).build();
try {
FileSystem fs = cluster.getFileSystem();
fs.open(new Path("/.reserved/.inodes/file"));
Assert.fail("Open a non existing file should fail.");
} catch (FileNotFoundException e) {
// Expected
} finally {
cluster.shutdown();
}
}
} }

View File

@ -157,7 +157,9 @@ public class TestDataNodeRollingUpgrade {
} }
private boolean isBlockFileInPrevious(File blockFile) { private boolean isBlockFileInPrevious(File blockFile) {
Pattern blockFilePattern = Pattern.compile("^(.*/current/.*/)(current)(/.*)$"); Pattern blockFilePattern = Pattern.compile(String.format(
"^(.*%1$scurrent%1$s.*%1$s)(current)(%1$s.*)$",
Pattern.quote(File.separator)));
Matcher matcher = blockFilePattern.matcher(blockFile.toString()); Matcher matcher = blockFilePattern.matcher(blockFile.toString());
String previousFileName = matcher.replaceFirst("$1" + "previous" + "$3"); String previousFileName = matcher.replaceFirst("$1" + "previous" + "$3");
return ((new File(previousFileName)).exists()); return ((new File(previousFileName)).exists());
@ -355,7 +357,7 @@ public class TestDataNodeRollingUpgrade {
assertTrue(isBlockFileInPrevious(blockFiles[1])); assertTrue(isBlockFileInPrevious(blockFiles[1]));
assertFalse(isTrashRootPresent()); assertFalse(isTrashRootPresent());
// Rollback and ensure that neither block file exists in trash or previous. // Finalize and ensure that neither block file exists in trash or previous.
finalizeRollingUpgrade(); finalizeRollingUpgrade();
assertFalse(isTrashRootPresent()); assertFalse(isTrashRootPresent());
assertFalse(isBlockFileInPrevious(blockFiles[0])); assertFalse(isBlockFileInPrevious(blockFiles[0]));

View File

@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -45,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -482,4 +484,43 @@ public class TestWebHDFS {
} }
} }
} }
@Test
public void testDTInInsecureClusterWithFallback()
throws IOException, URISyntaxException {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
conf.setBoolean(CommonConfigurationKeys
.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
Assert.assertNull(webHdfs.getDelegationToken(null));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testDTInInsecureCluster() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
webHdfs.getDelegationToken(null);
fail("No exception is thrown.");
} catch (AccessControlException ace) {
Assert.assertTrue(ace.getMessage().startsWith(
WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }

View File

@ -276,7 +276,13 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-6071. JobImpl#makeUberDecision doesn't log that Uber mode is MAPREDUCE-6071. JobImpl#makeUberDecision doesn't log that Uber mode is
disabled because of too much CPUs (Tsuyoshi OZAWA via jlowe) disabled because of too much CPUs (Tsuyoshi OZAWA via jlowe)
Release 2.5.1 - UNRELEASED MAPREDUCE-6075. HistoryServerFileSystemStateStore can create zero-length
files (jlowe)
MAPREDUCE-6070. yarn.app.am.resource.mb/cpu-vcores affects uber mode but
is not documented (Tsuyoshi OZAWA via jlowe)
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -194,7 +194,7 @@ public class CheckpointAMPreemptionPolicy implements AMPreemptionPolicy {
Collections.sort(listOfCont, new Comparator<Container>() { Collections.sort(listOfCont, new Comparator<Container>() {
@Override @Override
public int compare(final Container o1, final Container o2) { public int compare(final Container o1, final Container o2) {
return o2.getId().getId() - o1.getId().getId(); return o2.getId().compareTo(o1.getId());
} }
}); });

View File

@ -508,7 +508,12 @@
<description>Whether to enable the small-jobs "ubertask" optimization, <description>Whether to enable the small-jobs "ubertask" optimization,
which runs "sufficiently small" jobs sequentially within a single JVM. which runs "sufficiently small" jobs sequentially within a single JVM.
"Small" is defined by the following maxmaps, maxreduces, and maxbytes "Small" is defined by the following maxmaps, maxreduces, and maxbytes
settings. Users may override this value. settings. Note that configurations for application masters also affect
the "Small" definition - yarn.app.mapreduce.am.resource.mb must be
larger than both mapreduce.map.memory.mb and mapreduce.reduce.memory.mb,
and yarn.app.mapreduce.am.resource.cpu-vcores must be larger than
both mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores to enable
ubertask. Users may override this value.
</description> </description>
</property> </property>

View File

@ -189,6 +189,8 @@ public class HistoryServerFileSystemStateStoreService
DataOutputStream dataStream = new DataOutputStream(memStream); DataOutputStream dataStream = new DataOutputStream(memStream);
try { try {
key.write(dataStream); key.write(dataStream);
dataStream.close();
dataStream = null;
} finally { } finally {
IOUtils.cleanup(LOG, dataStream); IOUtils.cleanup(LOG, dataStream);
} }
@ -260,6 +262,8 @@ public class HistoryServerFileSystemStateStoreService
try { try {
try { try {
out.write(data); out.write(data);
out.close();
out = null;
} finally { } finally {
IOUtils.cleanup(LOG, out); IOUtils.cleanup(LOG, out);
} }
@ -299,6 +303,8 @@ public class HistoryServerFileSystemStateStoreService
try { try {
tokenId.write(dataStream); tokenId.write(dataStream);
dataStream.writeLong(renewDate); dataStream.writeLong(renewDate);
dataStream.close();
dataStream = null;
} finally { } finally {
IOUtils.cleanup(LOG, dataStream); IOUtils.cleanup(LOG, dataStream);
} }

View File

@ -327,6 +327,12 @@
<version>${project.version}</version> <version>${project.version}</version>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${project.version}</version>
</dependency>
<dependency> <dependency>
<groupId>com.google.guava</groupId> <groupId>com.google.guava</groupId>
<artifactId>guava</artifactId> <artifactId>guava</artifactId>
@ -576,6 +582,12 @@
<groupId>com.amazonaws</groupId> <groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk</artifactId> <artifactId>aws-java-sdk</artifactId>
<version>1.7.2</version> <version>1.7.2</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
</exclusions>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.mina</groupId> <groupId>org.apache.mina</groupId>

View File

@ -15,5 +15,361 @@
limitations under the License. limitations under the License.
--> -->
<FindBugsFilter> <FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.security.proto" />
</Match>
<Match>
<Package name="org.apache.hadoop.tools.proto" />
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP" />
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP2" />
</Match>
<Match>
<Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
</Match>
<Match>
<Class name="~.*_jsp" />
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
</Match>
<Match>
<Field name="_jspx_dependants" />
<Bug pattern="UWF_UNWRITTEN_FIELD" />
</Match>
<!--
Inconsistent synchronization for Client.Connection.out is
is intentional to make a connection to be closed instantly.
-->
<Match>
<Class name="org.apache.hadoop.ipc.Client$Connection" />
<Field name="out" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
Further SaslException should be ignored during cleanup and
original exception should be re-thrown.
-->
<Match>
<Class name="org.apache.hadoop.security.SaslRpcClient" />
<Bug pattern="DE_MIGHT_IGNORE" />
</Match>
<!--
Ignore Cross Scripting Vulnerabilities
-->
<Match>
<Package name="~org.apache.hadoop.mapred.*" />
<Bug code="XSS" />
</Match>
<Match>
<Class name="org.apache.hadoop.mapred.taskdetails_jsp" />
<Bug code="HRS" />
</Match>
<!--
Ignore warnings where child class has the same name as
super class. Classes based on Old API shadow names from
new API. Should go off after HADOOP-1.0
-->
<Match>
<Class name="~org.apache.hadoop.mapred.*" />
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS" />
</Match>
<!--
Ignore warnings for usage of System.exit. This is
required and have been well thought out
-->
<Match>
<Class name="org.apache.hadoop.mapred.Child$2" />
<Method name="run" />
<Bug pattern="DM_EXIT" />
</Match>
<Match>
<Class name="org.apache.hadoop.mapred.JobTracker" />
<Method name="addHostToNodeMapping" />
<Bug pattern="DM_EXIT" />
</Match>
<Match>
<Class name="org.apache.hadoop.mapred.Task" />
<Or>
<Method name="done" />
<Method name="commit" />
<Method name="statusUpdate" />
</Or>
<Bug pattern="DM_EXIT" />
</Match>
<Match>
<Class name="org.apache.hadoop.mapred.Task$TaskReporter" />
<Method name="run" />
<Bug pattern="DM_EXIT" />
</Match>
<Match>
<Class name="org.apache.hadoop.util.ProgramDriver" />
<Method name="driver" />
<Bug pattern="DM_EXIT" />
</Match>
<Match>
<Class name="org.apache.hadoop.util.RunJar" />
<Method name="run" />
<Bug pattern="DM_EXIT" />
</Match>
<!--
We need to cast objects between old and new api objects
-->
<Match>
<Class name="org.apache.hadoop.mapred.OutputCommitter" />
<Bug pattern="BC_UNCONFIRMED_CAST" />
</Match>
<!--
We intentionally do the get name from the inner class
-->
<Match>
<Class name="org.apache.hadoop.mapred.TaskTracker$MapEventsFetcherThread" />
<Method name="run" />
<Bug pattern="IA_AMBIGUOUS_INVOCATION_OF_INHERITED_OR_OUTER_METHOD" />
</Match>
<Match>
<Class name="org.apache.hadoop.mapred.FileOutputCommitter" />
<Bug pattern="NM_WRONG_PACKAGE_INTENTIONAL" />
</Match>
<!--
Ignoring this warning as resolving this would need a non-trivial change in code
-->
<Match>
<Class name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor" />
<Method name="configure" />
<Field name="maxNumItems" />
<Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
</Match>
<!--
Comes from org.apache.jasper.runtime.ResourceInjector. Cannot do much.
-->
<Match>
<Class name="org.apache.hadoop.mapred.jobqueue_005fdetails_jsp" />
<Field name="_jspx_resourceInjector" />
<Bug pattern="SE_BAD_FIELD" />
</Match>
<!--
Storing textInputFormat and then passing it as a parameter. Safe to ignore.
-->
<Match>
<Class name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob" />
<Method name="createValueAggregatorJob" />
<Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
</Match>
<!--
Can remove this after the upgrade to findbugs1.3.8
-->
<Match>
<Class name="org.apache.hadoop.mapred.lib.db.DBInputFormat" />
<Method name="getSplits" />
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
</Match>
<!--
This is a spurious warning. Just ignore
-->
<Match>
<Class name="org.apache.hadoop.mapred.MapTask$MapOutputBuffer" />
<Field name="kvindex" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
</FindBugsFilter> <!--
core changes
-->
<Match>
<Class name="~org.apache.hadoop.*" />
<Bug code="MS" />
</Match>
<Match>
<Class name="org.apache.hadoop.fs.FileSystem" />
<Method name="checkPath" />
<Bug pattern="ES_COMPARING_STRINGS_WITH_EQ" />
</Match>
<Match>
<Class name="org.apache.hadoop.io.Closeable" />
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_INTERFACE" />
</Match>
<Match>
<Class name="org.apache.hadoop.security.AccessControlException" />
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS" />
</Match>
<Match>
<Class name="org.apache.hadoop.util.ProcfsBasedProcessTree" />
<Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />
</Match>
<!--
Streaming, Examples
-->
<Match>
<Class name="org.apache.hadoop.streaming.StreamUtil$TaskId" />
<Bug pattern="URF_UNREAD_FIELD" />
</Match>
<Match>
<Class name="org.apache.hadoop.examples.DBCountPageView" />
<Method name="verify" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<Match>
<Class name="org.apache.hadoop.examples.ContextFactory" />
<Method name="setAttributes" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<!--
TFile
-->
<Match>
<Class name="org.apache.hadoop.io.file.tfile.Chunk$ChunkDecoder" />
<Method name="close" />
<Bug pattern="SR_NOT_CHECKED" />
</Match>
<!--
The purpose of skip() is to drain remaining bytes of the chunk-encoded
stream (one chunk at a time). The termination condition is checked by
checkEOF().
-->
<Match>
<Class name="org.apache.hadoop.io.file.tfile.Utils" />
<Method name="writeVLong" />
<Bug pattern="SF_SWITCH_FALLTHROUGH" />
</Match>
<!--
The switch condition fall through is intentional and for performance
purposes.
-->
<Match>
<Class name="org.apache.hadoop.log.EventCounter"/>
<!-- backward compatibility -->
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
</Match>
<Match>
<Class name="org.apache.hadoop.metrics.jvm.EventCounter"/>
<!-- backward compatibility -->
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtobufRpcEngineProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.RpcHeaderProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.security\.proto\.SecurityProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.TestProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.proto\.RefreshCallQueueProtocolProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.proto\.GenericRefreshProtocolProtos.*"/>
</Match>
<!--
Manually checked, misses child thread manually syncing on parent's intrinsic lock.
-->
<Match>
<Class name="org.apache.hadoop.metrics2.lib.MutableQuantiles" />
<Field name="previousSnapshot" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
The method uses a generic type T that extends two other types
T1 and T2. Findbugs complains of a cast from T1 to T2.
-->
<Match>
<Class name="org.apache.hadoop.fs.DelegationTokenRenewer" />
<Method name="removeRenewAction" />
<Bug pattern="BC_UNCONFIRMED_CAST" />
</Match>
<!-- Inconsistent synchronization flagged by findbugs is not valid. -->
<Match>
<Class name="org.apache.hadoop.ipc.Client$Connection" />
<Field name="in" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!--
The switch condition for INITIATE is expected to fallthru to RESPONSE
to process initial sasl response token included in the INITIATE
-->
<Match>
<Class name="org.apache.hadoop.ipc.Server$Connection" />
<Method name="processSaslMessage" />
<Bug pattern="SF_SWITCH_FALLTHROUGH" />
</Match>
<!-- Synchronization performed on util.concurrent instance. -->
<Match>
<Class name="org.apache.hadoop.service.AbstractService" />
<Method name="stop" />
<Bug code="JLM" />
</Match>
<Match>
<Class name="org.apache.hadoop.service.AbstractService" />
<Method name="waitForServiceToStop" />
<Bug code="JLM" />
</Match>
<!--
OpenStack Swift FS module -closes streams in a different method
from where they are opened.
-->
<Match>
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
<Method name="uploadFileAttempt"/>
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
</Match>
<Match>
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
<Method name="uploadFilePartAttempt"/>
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
</Match>
<!-- code from maven source, null value is checked at callee side. -->
<Match>
<Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
<Method name="compareTo" />
<Bug code="NP" />
</Match>
<Match>
<Class name="org.apache.hadoop.util.HttpExceptionUtils"/>
<Method name="validateResponse"/>
<Bug pattern="REC_CATCH_EXCEPTION"/>
</Match>
</FindBugsFilter>

View File

@ -0,0 +1,17 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
org.apache.hadoop.fs.s3.S3FileSystem
org.apache.hadoop.fs.s3native.NativeS3FileSystem

View File

@ -46,15 +46,6 @@ public abstract class S3FileSystemContractBaseTest
super.tearDown(); super.tearDown();
} }
public void testBlockSize() throws Exception {
Path file = path("/test/hadoop/file");
long newBlockSize = fs.getDefaultBlockSize(file) * 2;
fs.getConf().setLong("fs.s3.block.size", newBlockSize);
createFile(file);
assertEquals("Double default block size", newBlockSize,
fs.getFileStatus(file).getBlockSize());
}
public void testCanonicalName() throws Exception { public void testCanonicalName() throws Exception {
assertNull("s3 doesn't support security token and shouldn't have canonical name", assertNull("s3 doesn't support security token and shouldn't have canonical name",
fs.getCanonicalServiceName()); fs.getCanonicalServiceName());

View File

@ -83,6 +83,12 @@
<scope>compile</scope> <scope>compile</scope>
<version>${project.version}</version> <version>${project.version}</version>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<scope>compile</scope>
<version>${project.version}</version>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-azure</artifactId> <artifactId>hadoop-azure</artifactId>

View File

@ -67,6 +67,15 @@ Release 2.6.0 - UNRELEASED
YARN-2394. FairScheduler: Configure fairSharePreemptionThreshold per queue. YARN-2394. FairScheduler: Configure fairSharePreemptionThreshold per queue.
(Wei Yan via kasha) (Wei Yan via kasha)
YARN-415. Capture aggregate memory allocation at the app-level for chargeback.
(Eric Payne & Andrey Klochkov via jianhe)
YARN-2440. Enabled Nodemanagers to limit the aggregate cpu usage across all
containers to a preconfigured limit. (Varun Vasudev via vinodkv)
YARN-2033. YARN-2033. Merging generic-history into the Timeline Store
(Zhijie Shen via junping_du)
IMPROVEMENTS IMPROVEMENTS
YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
@ -196,6 +205,15 @@ Release 2.6.0 - UNRELEASED
YARN-2515. Updated ConverterUtils#toContainerId to parse epoch. YARN-2515. Updated ConverterUtils#toContainerId to parse epoch.
(Tsuyoshi OZAWA via jianhe) (Tsuyoshi OZAWA via jianhe)
YARN-2448. Changed ApplicationMasterProtocol to expose RM-recognized resource
types to the AMs. (Varun Vasudev via vinodkv)
YARN-2538. Added logs when RM sends roll-overed AMRMToken to AM. (Xuan Gong
via zjshen)
YARN-2229. Changed the integer field of ContainerId to be long type.
(Tsuyoshi OZAWA via jianhe)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -305,7 +323,24 @@ Release 2.6.0 - UNRELEASED
YARN-2526. SLS can deadlock when all the threads are taken by AMSimulators. YARN-2526. SLS can deadlock when all the threads are taken by AMSimulators.
(Wei Yan via kasha) (Wei Yan via kasha)
Release 2.5.1 - UNRELEASED YARN-1458. FairScheduler: Zero weight can lead to livelock.
(Zhihai Xu via kasha)
YARN-2459. RM crashes if App gets rejected for any reason
and HA is enabled. (Jian He and Mayank Bansal via xgong)
YARN-2158. Fixed TestRMWebServicesAppsModification#testSingleAppKill test
failure. (Varun Vasudev via jianhe)
YARN-2534. FairScheduler: Potential integer overflow calculating totalMaxShare.
(Zhihai Xu via kasha)
YARN-2541. Fixed ResourceManagerRest.apt.vm table syntax error. (jianhe)
YARN-2484. FileSystemRMStateStore#readFile/writeFile should close
FSData(In|Out)putStream in final block (Tsuyoshi OZAWA via jlowe)
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api.protocolrecords; package org.apache.hadoop.yarn.api.protocolrecords;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -31,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
@ -180,4 +182,25 @@ public abstract class RegisterApplicationMasterResponse {
@Private @Private
@Unstable @Unstable
public abstract void setNMTokensFromPreviousAttempts(List<NMToken> nmTokens); public abstract void setNMTokensFromPreviousAttempts(List<NMToken> nmTokens);
/**
* Get a set of the resource types considered by the scheduler.
*
* @return a Map of RM settings
*/
@Public
@Unstable
public abstract EnumSet<SchedulerResourceTypes> getSchedulerResourceTypes();
/**
* Set the resource types used by the scheduler.
*
* @param types
* a set of the resource types that the scheduler considers during
* scheduling
*/
@Private
@Unstable
public abstract void setSchedulerResourceTypes(
EnumSet<SchedulerResourceTypes> types);
} }

View File

@ -51,14 +51,15 @@ public abstract class ApplicationAttemptReport {
@Unstable @Unstable
public static ApplicationAttemptReport newInstance( public static ApplicationAttemptReport newInstance(
ApplicationAttemptId applicationAttemptId, String host, int rpcPort, ApplicationAttemptId applicationAttemptId, String host, int rpcPort,
String url, String diagnostics, YarnApplicationAttemptState state, String url, String oUrl, String diagnostics,
ContainerId amContainerId) { YarnApplicationAttemptState state, ContainerId amContainerId) {
ApplicationAttemptReport report = ApplicationAttemptReport report =
Records.newRecord(ApplicationAttemptReport.class); Records.newRecord(ApplicationAttemptReport.class);
report.setApplicationAttemptId(applicationAttemptId); report.setApplicationAttemptId(applicationAttemptId);
report.setHost(host); report.setHost(host);
report.setRpcPort(rpcPort); report.setRpcPort(rpcPort);
report.setTrackingUrl(url); report.setTrackingUrl(url);
report.setOriginalTrackingUrl(oUrl);
report.setDiagnostics(diagnostics); report.setDiagnostics(diagnostics);
report.setYarnApplicationAttemptState(state); report.setYarnApplicationAttemptState(state);
report.setAMContainerId(amContainerId); report.setAMContainerId(amContainerId);
@ -135,6 +136,19 @@ public abstract class ApplicationAttemptReport {
@Unstable @Unstable
public abstract void setTrackingUrl(String url); public abstract void setTrackingUrl(String url);
/**
* Get the <em>original tracking url</em> for the application attempt.
*
* @return <em>original tracking url</em> for the application attempt
*/
@Public
@Unstable
public abstract String getOriginalTrackingUrl();
@Private
@Unstable
public abstract void setOriginalTrackingUrl(String oUrl);
/** /**
* Get the <code>ApplicationAttemptId</code> of this attempt of the * Get the <code>ApplicationAttemptId</code> of this attempt of the
* application * application

View File

@ -35,7 +35,8 @@ public abstract class ApplicationResourceUsageReport {
@Unstable @Unstable
public static ApplicationResourceUsageReport newInstance( public static ApplicationResourceUsageReport newInstance(
int numUsedContainers, int numReservedContainers, Resource usedResources, int numUsedContainers, int numReservedContainers, Resource usedResources,
Resource reservedResources, Resource neededResources) { Resource reservedResources, Resource neededResources, long memorySeconds,
long vcoreSeconds) {
ApplicationResourceUsageReport report = ApplicationResourceUsageReport report =
Records.newRecord(ApplicationResourceUsageReport.class); Records.newRecord(ApplicationResourceUsageReport.class);
report.setNumUsedContainers(numUsedContainers); report.setNumUsedContainers(numUsedContainers);
@ -43,6 +44,8 @@ public abstract class ApplicationResourceUsageReport {
report.setUsedResources(usedResources); report.setUsedResources(usedResources);
report.setReservedResources(reservedResources); report.setReservedResources(reservedResources);
report.setNeededResources(neededResources); report.setNeededResources(neededResources);
report.setMemorySeconds(memorySeconds);
report.setVcoreSeconds(vcoreSeconds);
return report; return report;
} }
@ -113,4 +116,40 @@ public abstract class ApplicationResourceUsageReport {
@Private @Private
@Unstable @Unstable
public abstract void setNeededResources(Resource needed_resources); public abstract void setNeededResources(Resource needed_resources);
/**
* Set the aggregated amount of memory (in megabytes) the application has
* allocated times the number of seconds the application has been running.
* @param memory_seconds the aggregated amount of memory seconds
*/
@Private
@Unstable
public abstract void setMemorySeconds(long memory_seconds);
/**
* Get the aggregated amount of memory (in megabytes) the application has
* allocated times the number of seconds the application has been running.
* @return the aggregated amount of memory seconds
*/
@Public
@Unstable
public abstract long getMemorySeconds();
/**
* Set the aggregated number of vcores that the application has allocated
* times the number of seconds the application has been running.
* @param vcore_seconds the aggregated number of vcore seconds
*/
@Private
@Unstable
public abstract void setVcoreSeconds(long vcore_seconds);
/**
* Get the aggregated number of vcores that the application has allocated
* times the number of seconds the application has been running.
* @return the aggregated number of vcore seconds
*/
@Public
@Unstable
public abstract long getVcoreSeconds();
} }

View File

@ -41,9 +41,9 @@ public abstract class ContainerId implements Comparable<ContainerId>{
@Private @Private
@Unstable @Unstable
public static ContainerId newInstance(ApplicationAttemptId appAttemptId, public static ContainerId newInstance(ApplicationAttemptId appAttemptId,
int containerId) { long containerId) {
ContainerId id = Records.newRecord(ContainerId.class); ContainerId id = Records.newRecord(ContainerId.class);
id.setId(containerId); id.setContainerId(containerId);
id.setApplicationAttemptId(appAttemptId); id.setApplicationAttemptId(appAttemptId);
id.build(); id.build();
return id; return id;
@ -74,16 +74,28 @@ public abstract class ContainerId implements Comparable<ContainerId>{
protected abstract void setApplicationAttemptId(ApplicationAttemptId atId); protected abstract void setApplicationAttemptId(ApplicationAttemptId atId);
/** /**
* Get the identifier of the <code>ContainerId</code>. * Get the lower 32 bits of identifier of the <code>ContainerId</code>,
* @return identifier of the <code>ContainerId</code> * which doesn't include epoch. Note that this method will be marked as
* deprecated, so please use <code>getContainerId</code> instead.
* @return lower 32 bits of identifier of the <code>ContainerId</code>
*/ */
@Public @Public
@Stable @Stable
public abstract int getId(); public abstract int getId();
/**
* Get the identifier of the <code>ContainerId</code>. Upper 24 bits are
* reserved as epoch of cluster, and lower 40 bits are reserved as
* sequential number of containers.
* @return identifier of the <code>ContainerId</code>
*/
@Public
@Unstable
public abstract long getContainerId();
@Private @Private
@Unstable @Unstable
protected abstract void setId(int id); protected abstract void setContainerId(long id);
// TODO: fail the app submission if attempts are more than 10 or something // TODO: fail the app submission if attempts are more than 10 or something
@ -112,11 +124,9 @@ public abstract class ContainerId implements Comparable<ContainerId>{
@Override @Override
public int hashCode() { public int hashCode() {
// Generated by eclipse. // Generated by IntelliJ IDEA 13.1.
final int prime = 435569; int result = (int) (getContainerId() ^ (getContainerId() >>> 32));
int result = 7507; result = 31 * result + getApplicationAttemptId().hashCode();
result = prime * result + getId();
result = prime * result + getApplicationAttemptId().hashCode();
return result; return result;
} }
@ -131,7 +141,7 @@ public abstract class ContainerId implements Comparable<ContainerId>{
ContainerId other = (ContainerId) obj; ContainerId other = (ContainerId) obj;
if (!this.getApplicationAttemptId().equals(other.getApplicationAttemptId())) if (!this.getApplicationAttemptId().equals(other.getApplicationAttemptId()))
return false; return false;
if (this.getId() != other.getId()) if (this.getContainerId() != other.getContainerId())
return false; return false;
return true; return true;
} }
@ -140,12 +150,12 @@ public abstract class ContainerId implements Comparable<ContainerId>{
public int compareTo(ContainerId other) { public int compareTo(ContainerId other) {
if (this.getApplicationAttemptId().compareTo( if (this.getApplicationAttemptId().compareTo(
other.getApplicationAttemptId()) == 0) { other.getApplicationAttemptId()) == 0) {
return this.getId() - other.getId(); return Long.valueOf(getContainerId())
.compareTo(Long.valueOf(other.getContainerId()));
} else { } else {
return this.getApplicationAttemptId().compareTo( return this.getApplicationAttemptId().compareTo(
other.getApplicationAttemptId()); other.getApplicationAttemptId());
} }
} }
@Override @Override
@ -159,8 +169,8 @@ public abstract class ContainerId implements Comparable<ContainerId>{
sb.append( sb.append(
appAttemptIdAndEpochFormat.get().format( appAttemptIdAndEpochFormat.get().format(
getApplicationAttemptId().getAttemptId())).append("_"); getApplicationAttemptId().getAttemptId())).append("_");
sb.append(containerIdFormat.get().format(0x3fffff & getId())); sb.append(containerIdFormat.get().format(0xffffffffffL & getContainerId()));
int epoch = getId() >> 22; long epoch = getContainerId() >> 40;
if (epoch > 0) { if (epoch > 0) {
sb.append("_").append(appAttemptIdAndEpochFormat.get().format(epoch)); sb.append("_").append(appAttemptIdAndEpochFormat.get().format(epoch));
} }
@ -177,12 +187,12 @@ public abstract class ContainerId implements Comparable<ContainerId>{
} }
try { try {
ApplicationAttemptId appAttemptID = toApplicationAttemptId(it); ApplicationAttemptId appAttemptID = toApplicationAttemptId(it);
int id = Integer.parseInt(it.next()); long id = Long.parseLong(it.next());
int epoch = 0; long epoch = 0;
if (it.hasNext()) { if (it.hasNext()) {
epoch = Integer.parseInt(it.next()); epoch = Integer.parseInt(it.next());
} }
int cid = (epoch << 22) | id; long cid = (epoch << 40) | id;
ContainerId containerId = ContainerId.newInstance(appAttemptID, cid); ContainerId containerId = ContainerId.newInstance(appAttemptID, cid);
return containerId; return containerId;
} catch (NumberFormatException n) { } catch (NumberFormatException n) {

View File

@ -316,6 +316,19 @@ public class YarnConfiguration extends Configuration {
public static final int DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = public static final int DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE =
10; 10;
/**
* The setting that controls whether yarn system metrics is published on the
* timeline server or not by RM.
*/
public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED =
RM_PREFIX + "system-metrics-publisher.enabled";
public static final boolean DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED = false;
public static final String RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE =
RM_PREFIX + "system-metrics-publisher.dispatcher.pool-size";
public static final int DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE =
10;
//Delegation token related keys //Delegation token related keys
public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY = public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
RM_PREFIX + "delegation.key.update-interval"; RM_PREFIX + "delegation.key.update-interval";
@ -724,6 +737,12 @@ public class YarnConfiguration extends Configuration {
public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores"; public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";
public static final int DEFAULT_NM_VCORES = 8; public static final int DEFAULT_NM_VCORES = 8;
/** Percentage of overall CPU which can be allocated for containers. */
public static final String NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT =
NM_PREFIX + "resource.percentage-physical-cpu-limit";
public static final int DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT =
100;
/** NM Webapp address.**/ /** NM Webapp address.**/
public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address"; public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address";
public static final int DEFAULT_NM_WEBAPP_PORT = 8042; public static final int DEFAULT_NM_WEBAPP_PORT = 8042;

View File

@ -50,7 +50,7 @@ message ApplicationAttemptIdProto {
message ContainerIdProto { message ContainerIdProto {
optional ApplicationIdProto app_id = 1; optional ApplicationIdProto app_id = 1;
optional ApplicationAttemptIdProto app_attempt_id = 2; optional ApplicationAttemptIdProto app_attempt_id = 2;
optional int32 id = 3; optional int64 id = 3;
} }
message ResourceProto { message ResourceProto {
@ -167,6 +167,8 @@ message ApplicationResourceUsageReportProto {
optional ResourceProto used_resources = 3; optional ResourceProto used_resources = 3;
optional ResourceProto reserved_resources = 4; optional ResourceProto reserved_resources = 4;
optional ResourceProto needed_resources = 5; optional ResourceProto needed_resources = 5;
optional int64 memory_seconds = 6;
optional int64 vcore_seconds = 7;
} }
message ApplicationReportProto { message ApplicationReportProto {
@ -200,6 +202,7 @@ message ApplicationAttemptReportProto {
optional string diagnostics = 5 [default = "N/A"]; optional string diagnostics = 5 [default = "N/A"];
optional YarnApplicationAttemptStateProto yarn_application_attempt_state = 6; optional YarnApplicationAttemptStateProto yarn_application_attempt_state = 6;
optional ContainerIdProto am_container_id = 7; optional ContainerIdProto am_container_id = 7;
optional string original_tracking_url = 8;
} }
enum NodeStateProto { enum NodeStateProto {

View File

@ -47,6 +47,7 @@ message RegisterApplicationMasterResponseProto {
repeated ContainerProto containers_from_previous_attempts = 4; repeated ContainerProto containers_from_previous_attempts = 4;
optional string queue = 5; optional string queue = 5;
repeated NMTokenProto nm_tokens_from_previous_attempts = 6; repeated NMTokenProto nm_tokens_from_previous_attempts = 6;
repeated SchedulerResourceTypes scheduler_resource_types = 7;
} }
message FinishApplicationMasterRequestProto { message FinishApplicationMasterRequestProto {
@ -88,6 +89,11 @@ message AllocateResponseProto {
optional hadoop.common.TokenProto am_rm_token = 12; optional hadoop.common.TokenProto am_rm_token = 12;
} }
enum SchedulerResourceTypes {
MEMORY = 0;
CPU = 1;
}
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
/////// client_RM_Protocol /////////////////////////// /////// client_RM_Protocol ///////////////////////////
////////////////////////////////////////////////////// //////////////////////////////////////////////////////

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
@ -460,6 +461,11 @@ public class ApplicationCLI extends YarnCLI {
appReportStr.println(appReport.getRpcPort()); appReportStr.println(appReport.getRpcPort());
appReportStr.print("\tAM Host : "); appReportStr.print("\tAM Host : ");
appReportStr.println(appReport.getHost()); appReportStr.println(appReport.getHost());
appReportStr.print("\tAggregate Resource Allocation : ");
ApplicationResourceUsageReport usageReport = appReport.getApplicationResourceUsageReport();
appReportStr.print(usageReport.getMemorySeconds() + " MB-seconds, ");
appReportStr.println(usageReport.getVcoreSeconds() + " vcore-seconds");
appReportStr.print("\tDiagnostics : "); appReportStr.print("\tDiagnostics : ");
appReportStr.print(appReport.getDiagnostics()); appReportStr.print(appReport.getDiagnostics());
} else { } else {

View File

@ -676,7 +676,7 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes{
public ApplicationAttemptReport createFakeApplicationAttemptReport() { public ApplicationAttemptReport createFakeApplicationAttemptReport() {
return ApplicationAttemptReport.newInstance( return ApplicationAttemptReport.newInstance(
createFakeApplicationAttemptId(), "localhost", 0, "", "", createFakeApplicationAttemptId(), "localhost", 0, "", "", "",
YarnApplicationAttemptState.RUNNING, createFakeContainerId()); YarnApplicationAttemptState.RUNNING, createFakeContainerId());
} }

View File

@ -346,6 +346,7 @@ public class TestAHSClient {
"host", "host",
124, 124,
"url", "url",
"oUrl",
"diagnostics", "diagnostics",
YarnApplicationAttemptState.FINISHED, YarnApplicationAttemptState.FINISHED,
ContainerId.newInstance( ContainerId.newInstance(
@ -357,6 +358,7 @@ public class TestAHSClient {
"host", "host",
124, 124,
"url", "url",
"oUrl",
"diagnostics", "diagnostics",
YarnApplicationAttemptState.FINISHED, YarnApplicationAttemptState.FINISHED,
ContainerId.newInstance( ContainerId.newInstance(

View File

@ -457,6 +457,7 @@ public class TestYarnClient {
"host", "host",
124, 124,
"url", "url",
"oUrl",
"diagnostics", "diagnostics",
YarnApplicationAttemptState.FINISHED, YarnApplicationAttemptState.FINISHED,
ContainerId.newInstance( ContainerId.newInstance(
@ -467,6 +468,7 @@ public class TestYarnClient {
"host", "host",
124, 124,
"url", "url",
"oUrl",
"diagnostics", "diagnostics",
YarnApplicationAttemptState.FINISHED, YarnApplicationAttemptState.FINISHED,
ContainerId.newInstance( ContainerId.newInstance(

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerState;
@ -87,11 +88,15 @@ public class TestYarnCLI {
public void testGetApplicationReport() throws Exception { public void testGetApplicationReport() throws Exception {
ApplicationCLI cli = createAndGetAppCLI(); ApplicationCLI cli = createAndGetAppCLI();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationResourceUsageReport usageReport =
ApplicationResourceUsageReport.newInstance(
2, 0, null, null, null, 123456, 4567);
ApplicationReport newApplicationReport = ApplicationReport.newInstance( ApplicationReport newApplicationReport = ApplicationReport.newInstance(
applicationId, ApplicationAttemptId.newInstance(applicationId, 1), applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
"user", "queue", "appname", "host", 124, null, "user", "queue", "appname", "host", 124, null,
YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN",
null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
newApplicationReport); newApplicationReport);
int result = cli.run(new String[] { "application", "-status", applicationId.toString() }); int result = cli.run(new String[] { "application", "-status", applicationId.toString() });
@ -113,6 +118,7 @@ public class TestYarnCLI {
pw.println("\tTracking-URL : N/A"); pw.println("\tTracking-URL : N/A");
pw.println("\tRPC Port : 124"); pw.println("\tRPC Port : 124");
pw.println("\tAM Host : host"); pw.println("\tAM Host : host");
pw.println("\tAggregate Resource Allocation : 123456 MB-seconds, 4567 vcore-seconds");
pw.println("\tDiagnostics : diagnostics"); pw.println("\tDiagnostics : diagnostics");
pw.close(); pw.close();
String appReportStr = baos.toString("UTF-8"); String appReportStr = baos.toString("UTF-8");
@ -127,7 +133,7 @@ public class TestYarnCLI {
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
applicationId, 1); applicationId, 1);
ApplicationAttemptReport attemptReport = ApplicationAttemptReport ApplicationAttemptReport attemptReport = ApplicationAttemptReport
.newInstance(attemptId, "host", 124, "url", "diagnostics", .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics",
YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( YarnApplicationAttemptState.FINISHED, ContainerId.newInstance(
attemptId, 1)); attemptId, 1));
when( when(
@ -163,11 +169,11 @@ public class TestYarnCLI {
ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance( ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance(
applicationId, 2); applicationId, 2);
ApplicationAttemptReport attemptReport = ApplicationAttemptReport ApplicationAttemptReport attemptReport = ApplicationAttemptReport
.newInstance(attemptId, "host", 124, "url", "diagnostics", .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics",
YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( YarnApplicationAttemptState.FINISHED, ContainerId.newInstance(
attemptId, 1)); attemptId, 1));
ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport
.newInstance(attemptId1, "host", 124, "url", "diagnostics", .newInstance(attemptId1, "host", 124, "url", "oUrl", "diagnostics",
YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( YarnApplicationAttemptState.FINISHED, ContainerId.newInstance(
attemptId1, 1)); attemptId1, 1));
List<ApplicationAttemptReport> reports = new ArrayList<ApplicationAttemptReport>(); List<ApplicationAttemptReport> reports = new ArrayList<ApplicationAttemptReport>();

Some files were not shown because too many files have changed in this diff Show More